Merge branch 'development_k3s' into development_hetzner

* development_k3s:
  feat: Add K3s installer with complete lifecycle management
  feat: Add K3s installer with complete lifecycle management
  fixing startupcmd
  fix actions
  feat(k3s-installer)
This commit is contained in:
2025-11-30 15:58:41 +01:00
11 changed files with 1324 additions and 354 deletions

View File

@@ -20,6 +20,7 @@ import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
import incubaid.herolib.installers.virt.podman
import incubaid.herolib.installers.virt.kubernetes_installer
import incubaid.herolib.installers.infra.gitea
import incubaid.herolib.builder
@@ -80,6 +81,7 @@ pub fn run(args_ PlayArgs) ! {
herolib.play(mut plbook)!
vlang.play(mut plbook)!
podman.play(mut plbook)!
kubernetes_installer.play(mut plbook)!
gitea.play(mut plbook)!
giteaclient.play(mut plbook)!

View File

@@ -1,217 +0,0 @@
need to install following
#!/bin/bash
set -euo pipefail
EXTRA_ARGS=""
log_info() {
echo '[INFO] ' "$@"
}
log_fatal() {
echo '[ERROR] ' "$@" >&2
exit 1
}
source_env_file() {
local env_file="${1:-}"
if [ ! -f "$env_file" ]; then
log_fatal "Environment file not found: $env_file"
fi
set -a
source "$env_file"
set +a
}
check_root() {
if [ "$EUID" -ne 0 ]; then
log_fatal "This script must be run as root"
fi
}
install_deps() {
log_info "Updating package lists..."
if ! apt-get update -qq > /dev/null 2>&1; then
log_fatal "Failed to update package lists"
fi
if ! command -v curl &> /dev/null; then
log_info "Installing curl..."
apt-get install -y -qq curl > /dev/null 2>&1 || log_fatal "Failed to install curl"
fi
if ! command -v ip &> /dev/null; then
log_info "Installing iproute2 for ip command..."
apt-get install -y -qq iproute2 > /dev/null 2>&1 || log_fatal "Failed to install iproute2"
fi
if ! command -v k3s &> /dev/null; then
log_info "Installing k3s..."
if ! curl -fsSL -o /usr/local/bin/k3s https://github.com/k3s-io/k3s/releases/download/v1.33.1+k3s1/k3s 2>/dev/null; then
log_fatal "Failed to download k3s"
fi
chmod +x /usr/local/bin/k3s
fi
if ! command -v kubectl &> /dev/null; then
log_info "Installing kubectl..."
if ! curl -fsSL -o /usr/local/bin/kubectl https://dl.k8s.io/release/v1.33.1/bin/linux/amd64/kubectl 2>/dev/null; then
log_fatal "Failed to download kubectl"
fi
chmod +x /usr/local/bin/kubectl
fi
}
get_iface_ipv6() {
local iface="$1"
# Step 1: Find the next-hop for 400::/7
local route_line
route_line=$(ip -6 route | grep "^400::/7.*dev ${iface}" || true)
if [ -z "$route_line" ]; then
log_fatal "No 400::/7 route found via interface ${iface}"
fi
# Extract next-hop IPv6
local nexthop
nexthop=$(echo "$route_line" | awk '{for(i=1;i<=NF;i++) if ($i=="via") print $(i+1)}')
local prefix
prefix=$(echo "$nexthop" | cut -d':' -f1-4)
# Step 3: Get global IPv6 addresses and match subnet
local ipv6_list
ipv6_list=$(ip -6 addr show dev "$iface" scope global | awk '/inet6/ {print $2}' | cut -d'/' -f1)
local ip ip_prefix
for ip in $ipv6_list; do
ip_prefix=$(echo "$ip" | cut -d':' -f1-4)
if [ "$ip_prefix" = "$prefix" ]; then
echo "$ip"
return 0
fi
done
log_fatal "No global IPv6 address found on ${iface} matching prefix ${prefix}"
}
prepare_args() {
log_info "Preparing k3s arguments..."
if [ -z "${K3S_FLANNEL_IFACE:-}" ]; then
log_fatal "K3S_FLANNEL_IFACE not set, it should be your mycelium interface"
else
local ipv6
ipv6=$(get_iface_ipv6 "$K3S_FLANNEL_IFACE")
EXTRA_ARGS="$EXTRA_ARGS --node-ip=$ipv6"
fi
if [ -n "${K3S_DATA_DIR:-}" ]; then
log_info "k3s data-dir set to: $K3S_DATA_DIR"
if [ -d "/var/lib/rancher/k3s" ] && [ -n "$(ls -A /var/lib/rancher/k3s 2>/dev/null)" ]; then
cp -r /var/lib/rancher/k3s/* $K3S_DATA_DIR && rm -rf /var/lib/rancher/k3s
fi
EXTRA_ARGS="$EXTRA_ARGS --data-dir $K3S_DATA_DIR --kubelet-arg=root-dir=$K3S_DATA_DIR/kubelet"
fi
if [[ "${MASTER:-}" = "true" ]]; then
EXTRA_ARGS="$EXTRA_ARGS --cluster-cidr=2001:cafe:42::/56"
EXTRA_ARGS="$EXTRA_ARGS --service-cidr=2001:cafe:43::/112"
EXTRA_ARGS="$EXTRA_ARGS --flannel-ipv6-masq"
fi
if [ -z "${K3S_URL:-}" ]; then
# Add additional SANs for planetary network IP, public IPv4, and public IPv6
# https://github.com/threefoldtech/tf-images/issues/98
local ifaces=( "tun0" "eth1" "eth2" )
for iface in "${ifaces[@]}"
do
# Check if interface exists before querying
if ! ip addr show "$iface" &>/dev/null; then
continue
fi
local addrs
addrs=$(ip addr show "$iface" 2>/dev/null | grep -E "inet |inet6 " | grep "global" | cut -d '/' -f1 | awk '{print $2}' || true)
local addr
for addr in $addrs
do
# Validate the IP address by trying to route to it
if ip route get "$addr" &>/dev/null; then
EXTRA_ARGS="$EXTRA_ARGS --tls-san $addr"
fi
done
done
if [ "${HA:-}" = "true" ]; then
EXTRA_ARGS="$EXTRA_ARGS --cluster-init"
fi
else
if [ -z "${K3S_TOKEN:-}" ]; then
log_fatal "K3S_TOKEN must be set when K3S_URL is specified (joining a cluster)"
fi
fi
}
patch_manifests() {
log_info "Patching manifests..."
dir="${K3S_DATA_DIR:-/var/lib/rancher/k3s}"
manifest="$dir/server/manifests/tfgw-crd.yaml"
# If K3S_URL found, remove manifest and exit. it is an agent node
if [[ -n "${K3S_URL:-}" ]]; then
rm -f "$manifest"
log_info "Agent node detected, removed manifest: $manifest"
exit 0
fi
# If K3S_URL not found, patch the manifest. it is a server node
[[ ! -f "$manifest" ]] && echo "Manifest not found: $manifest" >&2 && exit 1
sed -i \
-e "s|\${MNEMONIC}|${MNEMONIC:-}|g" \
-e "s|\${NETWORK}|${NETWORK:-}|g" \
-e "s|\${TOKEN}|${TOKEN:-}|g" \
"$manifest"
}
run_node() {
if [ -z "${K3S_URL:-}" ]; then
log_info "Starting k3s server (initializing new cluster)..."
log_info "Command: k3s server --flannel-iface $K3S_FLANNEL_IFACE $EXTRA_ARGS"
exec k3s server --flannel-iface "$K3S_FLANNEL_IFACE" $EXTRA_ARGS 2>&1
elif [ "${MASTER:-}" = "true" ]; then
log_info "Starting k3s server (joining existing cluster as master)..."
log_info "Command: k3s server --server $K3S_URL --flannel-iface $K3S_FLANNEL_IFACE $EXTRA_ARGS"
exec k3s server --server "$K3S_URL" --flannel-iface "$K3S_FLANNEL_IFACE" $EXTRA_ARGS 2>&1
else
log_info "Starting k3s agent (joining existing cluster as worker)..."
log_info "Command: k3s agent --server $K3S_URL --flannel-iface $K3S_FLANNEL_IFACE $EXTRA_ARGS"
exec k3s agent --server "$K3S_URL" --flannel-iface "$K3S_FLANNEL_IFACE" $EXTRA_ARGS 2>&1
fi
}
main() {
source_env_file "${1:-}"
check_root
install_deps
prepare_args
patch_manifests
run_node
}
main "$@"
INSTRUCTIONS: USE HEROLIB AS MUCH AS POSSIBLE e.g. SAL

View File

@@ -2,119 +2,537 @@ module kubernetes_installer
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.texttools
import incubaid.herolib.core
import incubaid.herolib.core.pathlib
import incubaid.herolib.installers.ulist
import incubaid.herolib.osal.startupmanager
import os
//////////////////// following actions are not specific to instance of the object
//////////////////// STARTUP COMMAND ////////////////////
// checks if kubectl is installed and meets minimum version requirement
fn installed() !bool {
if !osal.cmd_exists('kubectl') {
return false
fn (self &KubernetesInstaller) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Get Mycelium IPv6 address
ipv6 := self.get_mycelium_ipv6()!
// Build K3s command based on node type
mut cmd := ''
mut extra_args := '--node-ip=${ipv6} --flannel-iface ${self.mycelium_interface}'
// Add data directory if specified
if self.data_dir != '' {
extra_args += ' --data-dir ${self.data_dir} --kubelet-arg=root-dir=${self.data_dir}/kubelet'
}
res := os.execute('${osal.profile_path_source_and()!} kubectl version --client --output=json')
if res.exit_code != 0 {
// Try older kubectl version command format
res2 := os.execute('${osal.profile_path_source_and()!} kubectl version --client --short')
if res2.exit_code != 0 {
return false
}
// Parse version from output like "Client Version: v1.31.0"
lines := res2.output.split_into_lines().filter(it.contains('Client Version'))
if lines.len == 0 {
return false
}
version_str := lines[0].all_after('v').trim_space()
if texttools.version(version) <= texttools.version(version_str) {
return true
}
return false
// Add token
if self.token != '' {
extra_args += ' --token ${self.token}'
}
// For newer kubectl versions with JSON output
// Just check if kubectl exists and runs - version checking is optional
return true
if self.is_master {
// Master node configuration
extra_args += ' --cluster-cidr=2001:cafe:42::/56 --service-cidr=2001:cafe:43::/112 --flannel-ipv6-masq'
if self.is_first_master {
// First master: initialize cluster
cmd = 'k3s server --cluster-init ${extra_args}'
} else {
// Additional master: join existing cluster
if self.master_url == '' {
return error('master_url is required for joining as additional master')
}
cmd = 'k3s server --server ${self.master_url} ${extra_args}'
}
} else {
// Worker node: join as agent
if self.master_url == '' {
return error('master_url is required for worker nodes')
}
cmd = 'k3s agent --server ${self.master_url} ${extra_args}'
}
res << startupmanager.ZProcessNewArgs{
name: 'k3s_${self.name}'
startuptype: .systemd
cmd: cmd
env: {
'HOME': os.home_dir()
}
}
return res
}
// get the Upload List of the files
//////////////////// RUNNING CHECK ////////////////////
fn running() !bool {
// Check if k3s process is running
res := osal.exec(cmd: 'pgrep -f "k3s (server|agent)"', stdout: false, raise_error: false)!
if res.exit_code == 0 {
// K3s process is running, that's enough for basic check
// We don't check kubectl connectivity here as it might not be ready immediately
// and could hang if kubeconfig is not properly configured
return true
}
return false
}
//////////////////// OS CHECK ////////////////////
fn check_ubuntu() ! {
// Check if running on Ubuntu
if !core.is_linux()! {
return error('K3s installer requires Linux. Current OS is not supported.')
}
// Check /etc/os-release for Ubuntu
content := os.read_file('/etc/os-release') or {
return error('Could not read /etc/os-release. Is this Ubuntu?')
}
if !content.contains('Ubuntu') && !content.contains('ubuntu') {
return error('This installer requires Ubuntu. Current OS is not Ubuntu.')
}
console.print_debug('OS check passed: Running on Ubuntu')
}
//////////////////// DEPENDENCY INSTALLATION ////////////////////
fn install_deps(k3s_version string) ! {
console.print_header('Installing dependencies...')
// Check and install curl
if !osal.cmd_exists('curl') {
console.print_header('Installing curl...')
osal.package_install('curl')!
}
// Check and install iproute2 (for ip command)
if !osal.cmd_exists('ip') {
console.print_header('Installing iproute2...')
osal.package_install('iproute2')!
}
// Install K3s binary
if !osal.cmd_exists('k3s') {
console.print_header('Installing K3s ${k3s_version}...')
k3s_url := 'https://github.com/k3s-io/k3s/releases/download/${k3s_version}+k3s1/k3s'
osal.download(
url: k3s_url
dest: '/tmp/k3s'
)!
// Make it executable and move to /usr/local/bin
osal.exec(cmd: 'chmod +x /tmp/k3s')!
osal.cmd_add(
cmdname: 'k3s'
source: '/tmp/k3s'
)!
}
// Install kubectl
if !osal.cmd_exists('kubectl') {
console.print_header('Installing kubectl...')
// Extract version number from k3s_version (e.g., v1.33.1)
kubectl_version := k3s_version
kubectl_url := 'https://dl.k8s.io/release/${kubectl_version}/bin/linux/amd64/kubectl'
osal.download(
url: kubectl_url
dest: '/tmp/kubectl'
)!
osal.exec(cmd: 'chmod +x /tmp/kubectl')!
osal.cmd_add(
cmdname: 'kubectl'
source: '/tmp/kubectl'
)!
}
console.print_header('All dependencies installed successfully')
}
//////////////////// INSTALLATION ACTIONS ////////////////////
fn installed() !bool {
return osal.cmd_exists('k3s') && osal.cmd_exists('kubectl')
}
// Install first master node
pub fn (mut self KubernetesInstaller) install_master() ! {
console.print_header('Installing K3s as first master node')
// Check OS
check_ubuntu()!
// Set flags
self.is_master = true
self.is_first_master = true
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s first master installation completed')
console.print_header('Token: ${self.token}')
console.print_header('To start K3s, run: kubernetes_installer.start')
// Generate join script
join_script := self.generate_join_script()!
console.print_header('Join script generated. Save this for other nodes:\n${join_script}')
}
// Join as additional master
pub fn (mut self KubernetesInstaller) join_master() ! {
console.print_header('Joining K3s cluster as additional master')
// Check OS
check_ubuntu()!
// Validate required fields
if self.token == '' {
return error('token is required to join cluster')
}
if self.master_url == '' {
return error('master_url is required to join cluster')
}
// Set flags
self.is_master = true
self.is_first_master = false
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s additional master installation completed')
console.print_header('To start K3s, run: kubernetes_installer.start')
}
// Install worker node
pub fn (mut self KubernetesInstaller) install_worker() ! {
console.print_header('Installing K3s as worker node')
// Check OS
check_ubuntu()!
// Validate required fields
if self.token == '' {
return error('token is required to join cluster')
}
if self.master_url == '' {
return error('master_url is required to join cluster')
}
// Set flags
self.is_master = false
self.is_first_master = false
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s worker installation completed')
console.print_header('To start K3s, run: kubernetes_installer.start')
}
//////////////////// UTILITY FUNCTIONS ////////////////////
// Get kubeconfig content
pub fn (self &KubernetesInstaller) get_kubeconfig() !string {
kubeconfig_path := self.kubeconfig_path()
mut kubeconfig_file := pathlib.get_file(path: kubeconfig_path) or {
return error('Kubeconfig not found at ${kubeconfig_path}. Is K3s running?')
}
if !kubeconfig_file.exists() {
return error('Kubeconfig not found at ${kubeconfig_path}. Is K3s running?')
}
return kubeconfig_file.read()!
}
// Generate join script for other nodes
pub fn (self &KubernetesInstaller) generate_join_script() !string {
if !self.is_first_master {
return error('Can only generate join script from first master node')
}
// Get Mycelium IPv6 of this master
master_ipv6 := self.get_mycelium_ipv6()!
master_url := 'https://[${master_ipv6}]:6443'
mut script := '#!/usr/bin/env hero
// ============================================================================
// K3s Cluster Join Script
// Generated from master node: ${self.node_name}
// ============================================================================
// Section 1: Join as Additional Master (HA)
// Uncomment to join as additional master node
/*
!!kubernetes_installer.configure
name:\'k3s_master_2\'
k3s_version:\'${self.k3s_version}\'
data_dir:\'${self.data_dir}\'
node_name:\'master-2\'
mycelium_interface:\'${self.mycelium_interface}\'
token:\'${self.token}\'
master_url:\'${master_url}\'
!!kubernetes_installer.join_master name:\'k3s_master_2\'
!!kubernetes_installer.start name:\'k3s_master_2\'
*/
// Section 2: Join as Worker Node
// Uncomment to join as worker node
/*
!!kubernetes_installer.configure
name:\'k3s_worker_1\'
k3s_version:\'${self.k3s_version}\'
data_dir:\'${self.data_dir}\'
node_name:\'worker-1\'
mycelium_interface:\'${self.mycelium_interface}\'
token:\'${self.token}\'
master_url:\'${master_url}\'
!!kubernetes_installer.install_worker name:\'k3s_worker_1\'
!!kubernetes_installer.start name:\'k3s_worker_1\'
*/
'
return script
}
//////////////////// CLEANUP ////////////////////
fn destroy() ! {
console.print_header('Destroying K3s installation')
// Get configuration to find data directory
// Try to get from current configuration, otherwise use common paths
mut data_dirs := []string{}
if cfg := get() {
data_dirs << cfg.data_dir
console.print_debug('Found configured data directory: ${cfg.data_dir}')
} else {
console.print_debug('No configuration found, will clean up common K3s paths')
}
// Always add common K3s directories to ensure complete cleanup
data_dirs << '/var/lib/rancher/k3s'
data_dirs << '/root/hero/var/k3s'
// CRITICAL: Complete systemd service deletion FIRST before any other cleanup
// This prevents the service from auto-restarting during cleanup
// Step 1: Stop and delete ALL k3s systemd services using startupmanager
console.print_header('Stopping and removing systemd services...')
// Get systemd startup manager
mut sm := startupmanager_get(.systemd) or {
console.print_debug('Failed to get systemd manager: ${err}')
return error('Could not get systemd manager: ${err}')
}
// List all k3s services
all_services := sm.list() or {
console.print_debug('Failed to list services: ${err}')
[]string{}
}
// Filter and delete k3s services
for service_name in all_services {
if service_name.starts_with('k3s_') {
console.print_debug('Deleting systemd service: ${service_name}')
// Use startupmanager.delete() which properly stops, disables, and removes the service
sm.delete(service_name) or {
console.print_debug('Failed to delete service ${service_name}: ${err}')
}
}
}
console.print_header(' Systemd services removed')
// Step 2: Kill any remaining K3s processes
console.print_header('Killing any remaining K3s processes...')
osal.exec(cmd: 'killall -9 k3s 2>/dev/null || true', stdout: false, raise_error: false) or {
console.print_debug('No k3s processes to kill or killall failed')
}
// Wait for processes to fully terminate
osal.exec(cmd: 'sleep 2', stdout: false) or {}
// Step 3: Unmount kubelet mounts (before network cleanup)
cleanup_mounts()!
// Step 4: Clean up network interfaces (after processes are stopped)
cleanup_network()!
// Step 5: Remove data directories
console.print_header('Removing data directories...')
// Remove all K3s data directories (deduplicated)
mut cleaned_dirs := map[string]bool{}
for data_dir in data_dirs {
if data_dir != '' && data_dir !in cleaned_dirs {
cleaned_dirs[data_dir] = true
console.print_debug('Removing data directory: ${data_dir}')
osal.exec(cmd: 'rm -rf ${data_dir}', stdout: false, raise_error: false) or {
console.print_debug('Failed to remove ${data_dir}: ${err}')
}
}
}
// Also remove /etc/rancher which K3s creates
console.print_debug('Removing /etc/rancher')
osal.exec(cmd: 'rm -rf /etc/rancher', stdout: false, raise_error: false) or {}
// Step 6: Clean up CNI
console.print_header('Cleaning up CNI directories...')
osal.exec(cmd: 'rm -rf /var/lib/cni/', stdout: false, raise_error: false) or {}
// Step 7: Clean up iptables rules
console.print_header('Cleaning up iptables rules')
osal.exec(
cmd: 'iptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore'
stdout: false
raise_error: false
) or {}
osal.exec(
cmd: 'ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore'
stdout: false
raise_error: false
) or {}
console.print_header('K3s destruction completed')
}
fn cleanup_network() ! {
console.print_header('Cleaning up network interfaces')
// Remove interfaces that are slaves of cni0
// Get the list first, then delete one by one
if veth_result := osal.exec(
cmd: 'ip link show | grep "master cni0" | awk -F: \'{print $2}\' | xargs'
stdout: false
raise_error: false
) {
if veth_result.output.trim_space() != '' {
veth_interfaces := veth_result.output.trim_space().split(' ')
for veth in veth_interfaces {
veth_trimmed := veth.trim_space()
if veth_trimmed != '' {
console.print_debug('Deleting veth interface: ${veth_trimmed}')
osal.exec(cmd: 'ip link delete ${veth_trimmed}', stdout: false, raise_error: false) or {
console.print_debug('Failed to delete ${veth_trimmed}, continuing...')
}
}
}
}
} else {
console.print_debug('No veth interfaces found or error getting list')
}
// Remove CNI-related interfaces
interfaces := ['cni0', 'flannel.1', 'flannel-v6.1', 'kube-ipvs0', 'flannel-wg', 'flannel-wg-v6']
for iface in interfaces {
console.print_debug('Deleting interface: ${iface}')
// Use timeout to prevent hanging, and redirect stderr to avoid blocking
osal.exec(cmd: 'timeout 5 ip link delete ${iface} 2>/dev/null || true', stdout: false, raise_error: false) or {
console.print_debug('Interface ${iface} not found or already deleted')
}
}
// Remove CNI namespaces
if ns_result := osal.exec(
cmd: 'ip netns show | grep cni- | xargs'
stdout: false
raise_error: false
) {
if ns_result.output.trim_space() != '' {
namespaces := ns_result.output.trim_space().split(' ')
for ns in namespaces {
ns_trimmed := ns.trim_space()
if ns_trimmed != '' {
console.print_debug('Deleting namespace: ${ns_trimmed}')
osal.exec(cmd: 'ip netns delete ${ns_trimmed}', stdout: false, raise_error: false) or {
console.print_debug('Failed to delete namespace ${ns_trimmed}')
}
}
}
}
} else {
console.print_debug('No CNI namespaces found')
}
}
fn cleanup_mounts() ! {
console.print_header('Cleaning up mounts')
// Unmount and remove kubelet directories
paths := ['/run/k3s', '/var/lib/kubelet/pods', '/var/lib/kubelet/plugins', '/run/netns/cni-']
for path in paths {
// Find all mounts under this path and unmount them
if mount_result := osal.exec(
cmd: 'mount | grep "${path}" | awk \'{print $3}\' | sort -r'
stdout: false
raise_error: false
) {
if mount_result.output.trim_space() != '' {
mount_points := mount_result.output.split_into_lines()
for mount_point in mount_points {
mp_trimmed := mount_point.trim_space()
if mp_trimmed != '' {
console.print_debug('Unmounting: ${mp_trimmed}')
osal.exec(cmd: 'umount -f ${mp_trimmed}', stdout: false, raise_error: false) or {
console.print_debug('Failed to unmount ${mp_trimmed}')
}
}
}
}
} else {
console.print_debug('No mounts found for ${path}')
}
// Remove the directory
console.print_debug('Removing directory: ${path}')
osal.exec(cmd: 'rm -rf ${path}', stdout: false, raise_error: false) or {}
}
}
//////////////////// GENERIC INSTALLER FUNCTIONS ////////////////////
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// Not applicable for kubectl
// Not applicable for K3s
}
fn install() ! {
console.print_header('install kubectl')
mut url := ''
mut dest_path := '/tmp/kubectl'
// Determine download URL based on platform
if core.is_linux_arm()! {
url = 'https://dl.k8s.io/release/v${version}/bin/linux/arm64/kubectl'
} else if core.is_linux_intel()! {
url = 'https://dl.k8s.io/release/v${version}/bin/linux/amd64/kubectl'
} else if core.is_osx_arm()! {
url = 'https://dl.k8s.io/release/v${version}/bin/darwin/arm64/kubectl'
} else if core.is_osx_intel()! {
url = 'https://dl.k8s.io/release/v${version}/bin/darwin/amd64/kubectl'
} else {
return error('unsupported platform for kubectl installation')
}
console.print_header('downloading kubectl from ${url}')
// Download kubectl binary
osal.download(
url: url
// minsize_kb: 40000 // kubectl is ~45MB
dest: dest_path
)!
// Make it executable
os.chmod(dest_path, 0o755)!
// Install to system
osal.cmd_add(
cmdname: 'kubectl'
source: dest_path
)!
// Create .kube directory with proper permissions
kube_dir := os.join_path(os.home_dir(), '.kube')
if !os.exists(kube_dir) {
console.print_header('creating ${kube_dir} directory')
os.mkdir_all(kube_dir)!
os.chmod(kube_dir, 0o700)! // read/write/execute for owner only
console.print_header('${kube_dir} directory created with permissions 0700')
} else {
// Ensure correct permissions even if directory exists
os.chmod(kube_dir, 0o700)!
console.print_header('${kube_dir} directory permissions set to 0700')
}
console.print_header('kubectl installed successfully')
}
fn destroy() ! {
console.print_header('destroy kubectl')
if !installed()! {
console.print_header('kubectl is not installed')
return
}
// Remove kubectl command
osal.cmd_delete('kubectl')!
// Clean up any temporary files
osal.rm('/tmp/kubectl')!
console.print_header('kubectl destruction completed')
return error('Use install_master, join_master, or install_worker instead of generic install')
}

View File

@@ -4,6 +4,9 @@ import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.osal.core as osal
import time
__global (
kubernetes_installer_global map[string]&KubernetesInstaller
@@ -125,22 +128,70 @@ pub fn play(mut plbook PlayBook) ! {
}
mut install_actions := plbook.find(filter: 'kubernetes_installer.configure')!
if install_actions.len > 0 {
return error("can't configure kubernetes_installer, because no configuration allowed for this installer.")
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'kubernetes_installer.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install'] {
mut p := other_action.params
reset := p.get_default_false('reset')
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut k8s_obj := get(name: name, create: true)!
console.print_debug('action object:\n${k8s_obj}')
if other_action.name in ['destroy', 'install', 'build'] {
if other_action.name == 'destroy' || reset {
console.print_debug('install action kubernetes_installer.destroy')
destroy()!
k8s_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action kubernetes_installer.install')
install()!
k8s_obj.install(reset: reset)!
}
}
if other_action.name in ['start', 'stop', 'restart'] {
if other_action.name == 'start' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.restart()!
}
}
// K3s-specific actions
if other_action.name in ['install_master', 'join_master', 'install_worker'] {
if other_action.name == 'install_master' {
console.print_debug('install action kubernetes_installer.install_master')
k8s_obj.install_master()!
}
if other_action.name == 'join_master' {
console.print_debug('install action kubernetes_installer.join_master')
k8s_obj.join_master()!
}
if other_action.name == 'install_worker' {
console.print_debug('install action kubernetes_installer.install_worker')
k8s_obj.install_worker()!
}
}
if other_action.name == 'get_kubeconfig' {
console.print_debug('install action kubernetes_installer.get_kubeconfig')
kubeconfig := k8s_obj.get_kubeconfig()!
console.print_header('Kubeconfig:\n${kubeconfig}')
}
if other_action.name == 'generate_join_script' {
console.print_debug('install action kubernetes_installer.generate_join_script')
script := k8s_obj.generate_join_script()!
console.print_header('Join Script:\n${script}')
}
other_action.done = true
}
}
@@ -149,12 +200,107 @@ pub fn play(mut plbook PlayBook) ! {
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
match cat {
.screen {
console.print_debug("installer: kubernetes_installer' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: kubernetes_installer' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: kubernetes_installer' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
console.print_debug("installer: kubernetes_installer' startupmanager get auto")
return startupmanager.get(.auto)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self KubernetesInstaller) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self KubernetesInstaller) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: kubernetes_installer start')
if !installed()! {
return error('K3s is not installed. Please run install_master, join_master, or install_worker first.')
}
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Create manifests directory for auto-apply
manifests_dir := '${self.data_dir}/server/manifests'
osal.dir_ensure(manifests_dir)!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: kubernetes_installer starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('kubernetes_installer did not start properly.')
}
pub fn (mut self KubernetesInstaller) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self KubernetesInstaller) stop() ! {
switch(self.name)
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
}
pub fn (mut self KubernetesInstaller) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self KubernetesInstaller) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in self.startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
@@ -170,6 +316,7 @@ pub fn (mut self KubernetesInstaller) install(args InstallArgs) ! {
pub fn (mut self KubernetesInstaller) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}

View File

@@ -1,27 +1,203 @@
module kubernetes_installer
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import os
import rand
pub const version = '1.31.0'
pub const version = 'v1.33.1'
const singleton = true
const default = true
// Kubernetes installer - handles kubectl installation
// K3s installer - handles K3s cluster installation with Mycelium IPv6 networking
@[heap]
pub struct KubernetesInstaller {
pub mut:
name string = 'default'
name string = 'default'
// K3s version to install
k3s_version string = version
// Data directory for K3s (default: ~/hero/var/k3s)
data_dir string
// Unique node name/identifier
node_name string
// Mycelium interface name (auto-detected if not specified)
mycelium_interface string
// Cluster token for authentication (auto-generated if empty)
token string
// Master URL for joining cluster (e.g., 'https://[ipv6]:6443')
master_url string
// Node IPv6 address (auto-detected from Mycelium if empty)
node_ip string
// Is this a master/control-plane node?
is_master bool
// Is this the first master (uses --cluster-init)?
is_first_master bool
}
// your checking & initialization code if needed
fn obj_init(mycfg_ KubernetesInstaller) !KubernetesInstaller {
mut mycfg := mycfg_
// Set default data directory if not provided
if mycfg.data_dir == '' {
mycfg.data_dir = os.join_path(os.home_dir(), 'hero/var/k3s')
}
// Expand home directory in data_dir if it contains ~
if mycfg.data_dir.starts_with('~') {
mycfg.data_dir = mycfg.data_dir.replace_once('~', os.home_dir())
}
// Set default node name if not provided
if mycfg.node_name == '' {
hostname := os.execute('hostname').output.trim_space()
mycfg.node_name = if hostname != '' { hostname } else { 'k3s-node-${rand.hex(4)}' }
}
// Auto-detect Mycelium interface if not provided
if mycfg.mycelium_interface == '' {
mycfg.mycelium_interface = detect_mycelium_interface()!
}
// Generate token if not provided and this is the first master
if mycfg.token == '' && mycfg.is_first_master {
// Generate a secure random token
mycfg.token = rand.hex(32)
}
// Note: Validation of token/master_url is done in the specific action functions
// (join_master, install_worker) where the context is clear
return mycfg
}
// Get path to kubeconfig file
pub fn (self &KubernetesInstaller) kubeconfig_path() string {
return '${self.data_dir}/server/cred/admin.kubeconfig'
}
// Get Mycelium IPv6 address from interface
pub fn (self &KubernetesInstaller) get_mycelium_ipv6() !string {
// If node_ip is already set, use it
if self.node_ip != '' {
return self.node_ip
}
// Otherwise, detect from Mycelium interface
return get_mycelium_ipv6_from_interface(self.mycelium_interface)!
}
// Auto-detect Mycelium interface by finding 400::/7 route
fn detect_mycelium_interface() !string {
// Find all 400::/7 routes
route_result := osal.exec(
cmd: 'ip -6 route | grep "^400::/7"'
stdout: false
raise_error: false
)!
if route_result.exit_code != 0 || route_result.output.trim_space() == '' {
return error('No Mycelium interface found (no 400::/7 route detected). Please ensure Mycelium is installed and running.')
}
// Parse interface name from route (format: "400::/7 dev <interface> ...")
route_line := route_result.output.trim_space()
parts := route_line.split(' ')
for i, part in parts {
if part == 'dev' && i + 1 < parts.len {
iface := parts[i + 1]
return iface
}
}
return error('Could not parse Mycelium interface from route output: ${route_line}')
}
// Helper function to detect Mycelium IPv6 from interface
fn get_mycelium_ipv6_from_interface(iface string) !string {
// Step 1: Find the 400::/7 route via the interface
route_result := osal.exec(
cmd: 'ip -6 route | grep "^400::/7.*dev ${iface}"'
stdout: false
) or { return error('No 400::/7 route found via interface ${iface}') }
route_line := route_result.output.trim_space()
if route_line == '' {
return error('No 400::/7 route found via interface ${iface}')
}
// Step 2: Get all global IPv6 addresses on the interface
addr_result := osal.exec(
cmd: 'ip -6 addr show dev ${iface} scope global | grep inet6 | awk \'{print $2}\' | cut -d/ -f1'
stdout: false
)!
ipv6_list := addr_result.output.split_into_lines()
// Check if route has a next-hop (via keyword)
parts := route_line.split(' ')
mut nexthop := ''
for i, part in parts {
if part == 'via' && i + 1 < parts.len {
nexthop = parts[i + 1]
break
}
}
if nexthop != '' {
// Route has a next-hop: match by prefix (first 4 segments)
prefix_parts := nexthop.split(':')
if prefix_parts.len < 4 {
return error('Invalid IPv6 next-hop format: ${nexthop}')
}
prefix := prefix_parts[0..4].join(':')
// Step 3: Match the one with the same prefix
for ip in ipv6_list {
ip_trimmed := ip.trim_space()
if ip_trimmed == '' {
continue
}
ip_parts := ip_trimmed.split(':')
if ip_parts.len >= 4 {
ip_prefix := ip_parts[0..4].join(':')
if ip_prefix == prefix {
return ip_trimmed
}
}
}
return error('No global IPv6 address found on ${iface} matching prefix ${prefix}')
} else {
// Direct route (no via): return the first IPv6 address in 400::/7 range
for ip in ipv6_list {
ip_trimmed := ip.trim_space()
if ip_trimmed == '' {
continue
}
// Check if IP is in 400::/7 range (starts with 4 or 5)
if ip_trimmed.starts_with('4') || ip_trimmed.starts_with('5') {
return ip_trimmed
}
}
return error('No global IPv6 address found on ${iface} in 400::/7 range')
}
}
// called before start if done
fn configure() ! {
// No configuration needed for kubectl
mut cfg := get()!
// Ensure data directory exists
osal.dir_ensure(cfg.data_dir)!
// Create manifests directory for auto-apply
manifests_dir := '${cfg.data_dir}/server/manifests'
osal.dir_ensure(manifests_dir)!
}
/////////////NORMALLY NO NEED TO TOUCH

View File

@@ -1,3 +0,0 @@
https://github.com/codescalers/kubecloud/blob/master/k3s/native_guide/k3s_killall.sh
still need to implement this

View File

@@ -1,44 +1,224 @@
# kubernetes_installer
# K3s Installer
Complete K3s cluster installer with multi-master HA support, worker nodes, and Mycelium IPv6 networking.
## Features
To get started
- **Multi-Master HA**: Install multiple master nodes with `--cluster-init`
- **Worker Nodes**: Add worker nodes to the cluster
- **Mycelium IPv6**: Automatic detection of Mycelium IPv6 addresses from the 400::/7 range
- **Lifecycle Management**: Start, stop, restart K3s via startupmanager (systemd/zinit/screen)
- **Join Scripts**: Auto-generate heroscripts for joining additional nodes
- **Complete Cleanup**: Destroy removes all K3s components, network interfaces, and data
## Quick Start
### Install First Master
```v
import incubaid.herolib.installers.virt.kubernetes_installer
heroscript := "
!!kubernetes_installer.configure
name:'k3s_master_1'
k3s_version:'v1.33.1'
node_name:'master-1'
mycelium_interface:'mycelium0'
import incubaid.herolib.installers.something.kubernetes_installer as kubernetes_installer_installer
heroscript:="
!!kubernetes_installer.configure name:'test'
password: '1234'
port: 7701
!!kubernetes_installer.start name:'test' reset:1
!!kubernetes_installer.install_master name:'k3s_master_1'
!!kubernetes_installer.start name:'k3s_master_1'
"
kubernetes_installer_installer.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= kubernetes_installer_installer.get()!
//installer.start(reset:true)!
kubernetes_installer.play(heroscript: heroscript)!
```
## example heroscript
### Join Additional Master (HA)
```hero
```v
heroscript := "
!!kubernetes_installer.configure
homedir: '/home/user/kubernetes_installer'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
name:'k3s_master_2'
node_name:'master-2'
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
!!kubernetes_installer.join_master name:'k3s_master_2'
!!kubernetes_installer.start name:'k3s_master_2'
"
kubernetes_installer.play(heroscript: heroscript)!
```
### Install Worker Node
```v
heroscript := "
!!kubernetes_installer.configure
name:'k3s_worker_1'
node_name:'worker-1'
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
!!kubernetes_installer.install_worker name:'k3s_worker_1'
!!kubernetes_installer.start name:'k3s_worker_1'
"
kubernetes_installer.play(heroscript: heroscript)!
```
## Configuration Options
| Field | Type | Default | Description |
|-------|------|---------|-------------|
| `name` | string | 'default' | Instance name |
| `k3s_version` | string | 'v1.33.1' | K3s version to install |
| `data_dir` | string | '~/hero/var/k3s' | Data directory for K3s |
| `node_name` | string | hostname | Unique node identifier |
| `mycelium_interface` | string | auto-detected | Mycelium interface name (auto-detected from 400::/7 route) |
| `token` | string | auto-generated | Cluster authentication token |
| `master_url` | string | - | Master URL for joining (e.g., 'https://[ipv6]:6443') |
| `node_ip` | string | auto-detected | Node IPv6 (auto-detected from Mycelium) |
## Actions
### Installation Actions
- `install_master` - Install first master node (generates token, uses --cluster-init)
- `join_master` - Join as additional master (requires token + master_url)
- `install_worker` - Install worker node (requires token + master_url)
### Lifecycle Actions
- `start` - Start K3s via startupmanager
- `stop` - Stop K3s
- `restart` - Restart K3s
- `destroy` - Complete cleanup (removes all K3s components)
### Utility Actions
- `get_kubeconfig` - Get kubeconfig content
- `generate_join_script` - Generate heroscript for joining nodes
## Requirements
- **OS**: Ubuntu (installer checks and fails on non-Ubuntu systems)
- **Mycelium**: Must be installed and running with interface in 400::/7 range
- **Root Access**: Required for installing system packages and managing network
## How It Works
### Mycelium IPv6 Detection
The installer automatically detects your Mycelium IPv6 address by:
1. Finding the 400::/7 route via the Mycelium interface
2. Extracting the next-hop IPv6 and getting the prefix (first 4 segments)
3. Matching global IPv6 addresses on the interface with the same prefix
4. Using the matched IPv6 for K3s `--node-ip`
This ensures K3s binds to the correct Mycelium IPv6 even if the server has other IPv6 addresses.
### Cluster Setup
**First Master:**
- Uses `--cluster-init` flag
- Auto-generates secure token
- Configures IPv6 CIDRs: cluster=2001:cafe:42::/56, service=2001:cafe:43::/112
- Generates join script for other nodes
**Additional Masters:**
- Joins with `--server <master_url>`
- Requires token and master_url from first master
- Provides HA for control plane
**Workers:**
- Joins as agent with `--server <master_url>`
- Requires token and master_url from first master
### Cleanup
The `destroy` action performs complete cleanup:
- Stops K3s process
- Removes network interfaces (cni0, flannel.*, etc.)
- Unmounts kubelet mounts
- Removes data directory
- Cleans up iptables/ip6tables rules
- Removes CNI namespaces
## Example Workflow
1. **Install first master on server1:**
```bash
hero run templates/examples.heroscript
# Note the token and IPv6 address displayed
```
2. **Join additional master on server2:**
```bash
# Edit examples.heroscript Section 2 with token and master_url
hero run templates/examples.heroscript
```
3. **Add worker on server3:**
```bash
# Edit examples.heroscript Section 3 with token and master_url
hero run templates/examples.heroscript
```
4. **Verify cluster:**
```bash
kubectl get nodes
kubectl get pods --all-namespaces
```
## Kubeconfig
The kubeconfig is located at: `<data_dir>/server/cred/admin.kubeconfig`
To use kubectl:
```bash
export KUBECONFIG=~/hero/var/k3s/server/cred/admin.kubeconfig
kubectl get nodes
```
Or copy to default location:
```bash
mkdir -p ~/.kube
cp ~/hero/var/k3s/server/cred/admin.kubeconfig ~/.kube/config
```
## Troubleshooting
**K3s won't start:**
- Check if Mycelium is running: `ip -6 addr show mycelium0`
- Verify 400::/7 route exists: `ip -6 route | grep 400::/7`
- Check logs: `journalctl -u k3s_* -f`
**Can't join cluster:**
- Verify token matches first master
- Ensure master_url uses correct IPv6 in brackets: `https://[ipv6]:6443`
- Check network connectivity over Mycelium: `ping6 <master_ipv6>`
**Cleanup issues:**
- Run destroy with sudo if needed
- Manually check for remaining processes: `pgrep -f k3s`
- Check for remaining mounts: `mount | grep k3s`
## See Also
- [K3s Documentation](https://docs.k3s.io/)
- [Mycelium Documentation](https://github.com/threefoldtech/mycelium)
- [Example Heroscript](templates/examples.heroscript)

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env hero
// ============================================================================
// K3s Cluster Installation Examples
// ============================================================================
//
// This file contains examples for installing K3s clusters with Mycelium IPv6
// networking. Choose the appropriate section based on your node type.
//
// Prerequisites:
// - Ubuntu OS
// - Mycelium installed and running
// - Mycelium interface (default: mycelium0)
// ============================================================================
// ============================================================================
// SECTION 1: Install First Master Node
// ============================================================================
// This creates the initial master node and initializes the cluster.
// The token will be auto-generated and displayed for use with other nodes.
!!kubernetes_installer.configure
name:'k3s_master_1'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'master-1'
// mycelium_interface:'mycelium0' // Optional: auto-detected if not specified
// Install as first master (will generate token and use --cluster-init)
!!kubernetes_installer.install_master name:'k3s_master_1'
// Start K3s
!!kubernetes_installer.start name:'k3s_master_1'
// Get kubeconfig (optional - to verify installation)
// !!kubernetes_installer.get_kubeconfig name:'k3s_master_1'
// Generate join script for other nodes (optional)
// !!kubernetes_installer.generate_join_script name:'k3s_master_1'
// ============================================================================
// SECTION 2: Join as Additional Master (HA Setup)
// ============================================================================
// Use this to add more master nodes for high availability.
// You MUST have the token and master_url from the first master.
/*
!!kubernetes_installer.configure
name:'k3s_master_2'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'master-2'
// mycelium_interface:'mycelium0' // Optional: auto-detected if not specified
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
// Join as additional master
!!kubernetes_installer.join_master name:'k3s_master_2'
// Start K3s
!!kubernetes_installer.start name:'k3s_master_2'
*/
// ============================================================================
// SECTION 3: Install Worker Node
// ============================================================================
// Use this to add worker nodes to the cluster.
// You MUST have the token and master_url from the first master.
/*
!!kubernetes_installer.configure
name:'k3s_worker_1'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'worker-1'
// mycelium_interface:'mycelium0' // Optional: auto-detected if not specified
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
// Install as worker
!!kubernetes_installer.install_worker name:'k3s_worker_1'
// Start K3s
!!kubernetes_installer.start name:'k3s_worker_1'
*/
// ============================================================================
// SECTION 4: Lifecycle Management
// ============================================================================
// Common operations for managing K3s
// Stop K3s
// !!kubernetes_installer.stop name:'k3s_master_1'
// Restart K3s
// !!kubernetes_installer.restart name:'k3s_master_1'
// Get kubeconfig
// !!kubernetes_installer.get_kubeconfig name:'k3s_master_1'
// Destroy K3s (complete cleanup)
// !!kubernetes_installer.destroy name:'k3s_master_1'
// ============================================================================
// NOTES:
// ============================================================================
// 1. Replace <TOKEN_FROM_FIRST_MASTER> with the actual token displayed after
// installing the first master
// 2. Replace <MASTER_IPV6> with the Mycelium IPv6 address of the first master
// 3. The data_dir defaults to ~/hero/var/k3s if not specified
// 4. The mycelium_interface defaults to 'mycelium0' if not specified
// 5. The k3s_version defaults to 'v1.33.1' if not specified
// 6. After installation, use kubectl to manage your cluster:
// - kubectl get nodes
// - kubectl get pods --all-namespaces
// 7. The kubeconfig is located at: <data_dir>/server/cred/admin.kubeconfig

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playcmds
import incubaid.herolib.ui.console
// ============================================================================
// K3s Join Additional Master (HA Setup)
// ============================================================================
// This script shows how to join an additional master node to an existing
// K3s cluster for high availability.
//
// Prerequisites:
// 1. First master must be running
// 2. You need the token from the first master
// 3. You need the master URL (IPv6 address and port)
// ============================================================================
console.print_header('='.repeat(80))
console.print_header('K3s Join Additional Master Node')
console.print_header('='.repeat(80))
// IMPORTANT: Replace these values with your actual cluster information
// You can get these from the first master's join script or by running:
// !!kubernetes_installer.generate_join_script name:"k3s_master_1"
master_token := 'YOUR_CLUSTER_TOKEN_HERE' // Get from first master
master_url := 'https://[YOUR_MASTER_IPV6]:6443' // First master's IPv6 address
join_master_script := '
!!kubernetes_installer.configure
name:"k3s_master_2"
k3s_version:"v1.33.1"
data_dir:"~/hero/var/k3s"
node_name:"master-2"
mycelium_interface:"mycelium"
token:"${master_token}"
master_url:"${master_url}"
!!kubernetes_installer.join_master name:"k3s_master_2"
!!kubernetes_installer.start name:"k3s_master_2"
'
console.print_header('⚠️ Before running, make sure to:')
console.print_header(' 1. Update master_token with your cluster token')
console.print_header(' 2. Update master_url with your first master IPv6')
console.print_header(' 3. Ensure first master is running')
console.print_header('')
// Uncomment the line below to actually run the join
// playcmds.run(heroscript: join_master_script)!
console.print_header('✅ Script ready. Uncomment playcmds.run() to execute.')
console.print_header('='.repeat(80))

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playcmds
import incubaid.herolib.ui.console
// ============================================================================
// K3s Join Worker Node
// ============================================================================
// This script shows how to join a worker node to an existing K3s cluster.
//
// Prerequisites:
// 1. At least one master must be running
// 2. You need the token from the master
// 3. You need the master URL (IPv6 address and port)
// ============================================================================
console.print_header('='.repeat(80))
console.print_header('K3s Join Worker Node')
console.print_header('='.repeat(80))
// IMPORTANT: Replace these values with your actual cluster information
// You can get these from the master's join script or by running:
// !!kubernetes_installer.generate_join_script name:"k3s_master_1"
master_token := 'YOUR_CLUSTER_TOKEN_HERE' // Get from master
master_url := 'https://[YOUR_MASTER_IPV6]:6443' // Master's IPv6 address
join_worker_script := '
!!kubernetes_installer.configure
name:"k3s_worker_1"
k3s_version:"v1.33.1"
data_dir:"~/hero/var/k3s"
node_name:"worker-1"
mycelium_interface:"mycelium"
token:"${master_token}"
master_url:"${master_url}"
!!kubernetes_installer.install_worker name:"k3s_worker_1"
!!kubernetes_installer.start name:"k3s_worker_1"
'
console.print_header('⚠️ Before running, make sure to:')
console.print_header(' 1. Update master_token with your cluster token')
console.print_header(' 2. Update master_url with your master IPv6')
console.print_header(' 3. Ensure master is running')
console.print_header('')
// Uncomment the line below to actually run the join
// playcmds.run(heroscript: join_worker_script)!
console.print_header('✅ Script ready. Uncomment playcmds.run() to execute.')
console.print_header('='.repeat(80))

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playcmds
import incubaid.herolib.ui.console
console.print_header('='*.repeat(80))
console.print_header('K3s Install/Uninstall Lifecycle Test')
console.print_header('='*.repeat(80))
// ============================================================================
// PHASE 1: Install Master
// ============================================================================
console.print_header('\n📦 PHASE 1: Installing K3s Master')
install_script := '
!!kubernetes_installer.configure
name:"k3s_test"
node_name:"test-master"
!!kubernetes_installer.install_master name:"k3s_test"
!!kubernetes_installer.start name:"k3s_test"
'
playcmds.run(heroscript: install_script)!
console.print_header('✅ Installation completed!')
// ============================================================================
// PHASE 2: Uninstall
// ============================================================================
console.print_header('\n🧹 PHASE 2: Uninstalling K3s')
uninstall_script := '
!!kubernetes_installer.configure
name:"k3s_test"
!!kubernetes_installer.destroy name:"k3s_test"
'
playcmds.run(heroscript: uninstall_script)!
console.print_header('✅ Uninstallation completed!')
console.print_header('\n' + '='.repeat(80))
console.print_header('✅ FULL LIFECYCLE TEST COMPLETED!')
console.print_header('='.repeat(80))