feat(k3s-installer)

This commit is contained in:
peternashaat
2025-11-26 11:55:57 +00:00
parent aa434fddee
commit 3f09aad045
5 changed files with 1005 additions and 134 deletions

View File

@@ -2,119 +2,431 @@ module kubernetes_installer
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.texttools
import incubaid.herolib.core
import incubaid.herolib.core.pathlib
import incubaid.herolib.installers.ulist
import incubaid.herolib.osal.startupmanager
import os
//////////////////// following actions are not specific to instance of the object
//////////////////// STARTUP COMMAND ////////////////////
// checks if kubectl is installed and meets minimum version requirement
fn installed() !bool {
if !osal.cmd_exists('kubectl') {
return false
fn (self &KubernetesInstaller) startupcmd() ![]startupmanager.ZProcessNewArgs {
mut res := []startupmanager.ZProcessNewArgs{}
// Get Mycelium IPv6 address
ipv6 := self.get_mycelium_ipv6()!
// Build K3s command based on node type
mut cmd := ''
mut extra_args := '--node-ip=${ipv6} --flannel-iface ${self.mycelium_interface}'
// Add data directory if specified
if self.data_dir != '' {
extra_args += ' --data-dir ${self.data_dir} --kubelet-arg=root-dir=${self.data_dir}/kubelet'
}
res := os.execute('${osal.profile_path_source_and()!} kubectl version --client --output=json')
if res.exit_code != 0 {
// Try older kubectl version command format
res2 := os.execute('${osal.profile_path_source_and()!} kubectl version --client --short')
if res2.exit_code != 0 {
return false
}
// Parse version from output like "Client Version: v1.31.0"
lines := res2.output.split_into_lines().filter(it.contains('Client Version'))
if lines.len == 0 {
return false
}
version_str := lines[0].all_after('v').trim_space()
if texttools.version(version) <= texttools.version(version_str) {
return true
}
return false
// Add token
if self.token != '' {
extra_args += ' --token ${self.token}'
}
// For newer kubectl versions with JSON output
// Just check if kubectl exists and runs - version checking is optional
return true
if self.is_master {
// Master node configuration
extra_args += ' --cluster-cidr=2001:cafe:42::/56 --service-cidr=2001:cafe:43::/112 --flannel-ipv6-masq'
if self.is_first_master {
// First master: initialize cluster
cmd = 'k3s server --cluster-init ${extra_args}'
} else {
// Additional master: join existing cluster
if self.master_url == '' {
return error('master_url is required for joining as additional master')
}
cmd = 'k3s server --server ${self.master_url} ${extra_args}'
}
} else {
// Worker node: join as agent
if self.master_url == '' {
return error('master_url is required for worker nodes')
}
cmd = 'k3s agent --server ${self.master_url} ${extra_args}'
}
res << startupmanager.ZProcessNewArgs{
name: 'k3s_${self.name}'
cmd: cmd
env: {
'HOME': os.home_dir()
}
}
return res
}
// get the Upload List of the files
//////////////////// RUNNING CHECK ////////////////////
fn running() !bool {
// Check if k3s process is running
res := osal.exec(cmd: 'pgrep -f "k3s (server|agent)"', stdout: false, raise_error: false)!
if res.exit_code == 0 {
// Also check if kubectl can connect
kubectl_res := osal.exec(
cmd: 'kubectl get nodes'
stdout: false
raise_error: false
)!
return kubectl_res.exit_code == 0
}
return false
}
//////////////////// OS CHECK ////////////////////
fn check_ubuntu() ! {
// Check if running on Ubuntu
if !core.is_linux()! {
return error('K3s installer requires Linux. Current OS is not supported.')
}
// Check /etc/os-release for Ubuntu
mut os_release := pathlib.get_file(path: '/etc/os-release') or {
return error('Could not read /etc/os-release. Is this Ubuntu?')
}
content := os_release.read()!
if !content.contains('Ubuntu') && !content.contains('ubuntu') {
return error('This installer requires Ubuntu. Current OS is not Ubuntu.')
}
console.print_debug('OS check passed: Running on Ubuntu')
}
//////////////////// DEPENDENCY INSTALLATION ////////////////////
fn install_deps(k3s_version string) ! {
console.print_header('Installing dependencies...')
// Check and install curl
if !osal.cmd_exists('curl') {
console.print_header('Installing curl...')
osal.package_install('curl')!
}
// Check and install iproute2 (for ip command)
if !osal.cmd_exists('ip') {
console.print_header('Installing iproute2...')
osal.package_install('iproute2')!
}
// Install K3s binary
if !osal.cmd_exists('k3s') {
console.print_header('Installing K3s ${k3s_version}...')
k3s_url := 'https://github.com/k3s-io/k3s/releases/download/${k3s_version}+k3s1/k3s'
osal.download(
url: k3s_url
dest: '/tmp/k3s'
)!
// Make it executable and move to /usr/local/bin
osal.exec(cmd: 'chmod +x /tmp/k3s')!
osal.cmd_add(
cmdname: 'k3s'
source: '/tmp/k3s'
)!
}
// Install kubectl
if !osal.cmd_exists('kubectl') {
console.print_header('Installing kubectl...')
// Extract version number from k3s_version (e.g., v1.33.1)
kubectl_version := k3s_version
kubectl_url := 'https://dl.k8s.io/release/${kubectl_version}/bin/linux/amd64/kubectl'
osal.download(
url: kubectl_url
dest: '/tmp/kubectl'
)!
osal.exec(cmd: 'chmod +x /tmp/kubectl')!
osal.cmd_add(
cmdname: 'kubectl'
source: '/tmp/kubectl'
)!
}
console.print_header('All dependencies installed successfully')
}
//////////////////// INSTALLATION ACTIONS ////////////////////
fn installed() !bool {
return osal.cmd_exists('k3s') && osal.cmd_exists('kubectl')
}
// Install first master node
pub fn (mut self KubernetesInstaller) install_master() ! {
console.print_header('Installing K3s as first master node')
// Check OS
check_ubuntu()!
// Set flags
self.is_master = true
self.is_first_master = true
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s first master installation completed')
console.print_header('Token: ${self.token}')
console.print_header('To start K3s, run: kubernetes_installer.start')
// Generate join script
join_script := self.generate_join_script()!
console.print_header('Join script generated. Save this for other nodes:\n${join_script}')
}
// Join as additional master
pub fn (mut self KubernetesInstaller) join_master() ! {
console.print_header('Joining K3s cluster as additional master')
// Check OS
check_ubuntu()!
// Validate required fields
if self.token == '' {
return error('token is required to join cluster')
}
if self.master_url == '' {
return error('master_url is required to join cluster')
}
// Set flags
self.is_master = true
self.is_first_master = false
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s additional master installation completed')
console.print_header('To start K3s, run: kubernetes_installer.start')
}
// Install worker node
pub fn (mut self KubernetesInstaller) install_worker() ! {
console.print_header('Installing K3s as worker node')
// Check OS
check_ubuntu()!
// Validate required fields
if self.token == '' {
return error('token is required to join cluster')
}
if self.master_url == '' {
return error('master_url is required to join cluster')
}
// Set flags
self.is_master = false
self.is_first_master = false
// Install dependencies
install_deps(self.k3s_version)!
// Ensure data directory exists
osal.dir_ensure(self.data_dir)!
// Save configuration
set(self)!
console.print_header('K3s worker installation completed')
console.print_header('To start K3s, run: kubernetes_installer.start')
}
//////////////////// UTILITY FUNCTIONS ////////////////////
// Get kubeconfig content
pub fn (self &KubernetesInstaller) get_kubeconfig() !string {
kubeconfig_path := self.kubeconfig_path()
mut kubeconfig_file := pathlib.get_file(path: kubeconfig_path) or {
return error('Kubeconfig not found at ${kubeconfig_path}. Is K3s running?')
}
if !kubeconfig_file.exists() {
return error('Kubeconfig not found at ${kubeconfig_path}. Is K3s running?')
}
return kubeconfig_file.read()!
}
// Generate join script for other nodes
pub fn (self &KubernetesInstaller) generate_join_script() !string {
if !self.is_first_master {
return error('Can only generate join script from first master node')
}
// Get Mycelium IPv6 of this master
master_ipv6 := self.get_mycelium_ipv6()!
master_url := 'https://[${master_ipv6}]:6443'
mut script := '#!/usr/bin/env hero
// ============================================================================
// K3s Cluster Join Script
// Generated from master node: ${self.node_name}
// ============================================================================
// Section 1: Join as Additional Master (HA)
// Uncomment to join as additional master node
/*
!!kubernetes_installer.configure
name:\'k3s_master_2\'
k3s_version:\'${self.k3s_version}\'
data_dir:\'${self.data_dir}\'
node_name:\'master-2\'
mycelium_interface:\'${self.mycelium_interface}\'
token:\'${self.token}\'
master_url:\'${master_url}\'
!!kubernetes_installer.join_master name:\'k3s_master_2\'
!!kubernetes_installer.start name:\'k3s_master_2\'
*/
// Section 2: Join as Worker Node
// Uncomment to join as worker node
/*
!!kubernetes_installer.configure
name:\'k3s_worker_1\'
k3s_version:\'${self.k3s_version}\'
data_dir:\'${self.data_dir}\'
node_name:\'worker-1\'
mycelium_interface:\'${self.mycelium_interface}\'
token:\'${self.token}\'
master_url:\'${master_url}\'
!!kubernetes_installer.install_worker name:\'k3s_worker_1\'
!!kubernetes_installer.start name:\'k3s_worker_1\'
*/
'
return script
}
//////////////////// CLEANUP ////////////////////
fn destroy() ! {
console.print_header('Destroying K3s installation')
// Stop K3s if running
osal.process_kill_recursive(name: 'k3s')!
// Get configuration to find data directory
mut cfg := get() or {
console.print_debug('No configuration found, using default paths')
KubernetesInstaller{}
}
data_dir := if cfg.data_dir != '' { cfg.data_dir } else { '/var/lib/rancher/k3s' }
// Clean up network interfaces
cleanup_network()!
// Unmount kubelet mounts
cleanup_mounts()!
// Remove data directory
if data_dir != '' {
console.print_header('Removing data directory: ${data_dir}')
osal.rm(data_dir)!
}
// Clean up CNI
osal.exec(cmd: 'rm -rf /var/lib/cni/', stdout: false) or {}
// Clean up iptables rules
console.print_header('Cleaning up iptables rules')
osal.exec(
cmd: 'iptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore'
stdout: false
raise_error: false
) or {}
osal.exec(
cmd: 'ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore'
stdout: false
raise_error: false
) or {}
console.print_header('K3s destruction completed')
}
fn cleanup_network() ! {
console.print_header('Cleaning up network interfaces')
// Remove interfaces that are slaves of cni0
osal.exec(
cmd: 'ip link show | grep "master cni0" | awk -F: \'{print $2}\' | xargs -r -n1 ip link delete'
stdout: false
raise_error: false
) or {}
// Remove CNI-related interfaces
interfaces := ['cni0', 'flannel.1', 'flannel-v6.1', 'kube-ipvs0', 'flannel-wg', 'flannel-wg-v6']
for iface in interfaces {
osal.exec(cmd: 'ip link delete ${iface}', stdout: false, raise_error: false) or {}
}
// Remove CNI namespaces
osal.exec(
cmd: 'ip netns show | grep cni- | xargs -r -n1 ip netns delete'
stdout: false
raise_error: false
) or {}
}
fn cleanup_mounts() ! {
console.print_header('Cleaning up mounts')
// Unmount and remove kubelet directories
paths := ['/run/k3s', '/var/lib/kubelet/pods', '/var/lib/kubelet/plugins', '/run/netns/cni-']
for path in paths {
// Find all mounts under this path and unmount them
osal.exec(
cmd: 'mount | grep "${path}" | awk \'{print $3}\' | sort -r | xargs -r -n1 umount -f'
stdout: false
raise_error: false
) or {}
// Remove the directory
osal.exec(cmd: 'rm -rf ${path}', stdout: false, raise_error: false) or {}
}
}
//////////////////// GENERIC INSTALLER FUNCTIONS ////////////////////
fn ulist_get() !ulist.UList {
return ulist.UList{}
}
// uploads to S3 server if configured
fn upload() ! {
// Not applicable for kubectl
// Not applicable for K3s
}
fn install() ! {
console.print_header('install kubectl')
mut url := ''
mut dest_path := '/tmp/kubectl'
// Determine download URL based on platform
if core.is_linux_arm()! {
url = 'https://dl.k8s.io/release/v${version}/bin/linux/arm64/kubectl'
} else if core.is_linux_intel()! {
url = 'https://dl.k8s.io/release/v${version}/bin/linux/amd64/kubectl'
} else if core.is_osx_arm()! {
url = 'https://dl.k8s.io/release/v${version}/bin/darwin/arm64/kubectl'
} else if core.is_osx_intel()! {
url = 'https://dl.k8s.io/release/v${version}/bin/darwin/amd64/kubectl'
} else {
return error('unsupported platform for kubectl installation')
}
console.print_header('downloading kubectl from ${url}')
// Download kubectl binary
osal.download(
url: url
// minsize_kb: 40000 // kubectl is ~45MB
dest: dest_path
)!
// Make it executable
os.chmod(dest_path, 0o755)!
// Install to system
osal.cmd_add(
cmdname: 'kubectl'
source: dest_path
)!
// Create .kube directory with proper permissions
kube_dir := os.join_path(os.home_dir(), '.kube')
if !os.exists(kube_dir) {
console.print_header('creating ${kube_dir} directory')
os.mkdir_all(kube_dir)!
os.chmod(kube_dir, 0o700)! // read/write/execute for owner only
console.print_header('${kube_dir} directory created with permissions 0700')
} else {
// Ensure correct permissions even if directory exists
os.chmod(kube_dir, 0o700)!
console.print_header('${kube_dir} directory permissions set to 0700')
}
console.print_header('kubectl installed successfully')
}
fn destroy() ! {
console.print_header('destroy kubectl')
if !installed()! {
console.print_header('kubectl is not installed')
return
}
// Remove kubectl command
osal.cmd_delete('kubectl')!
// Clean up any temporary files
osal.rm('/tmp/kubectl')!
console.print_header('kubectl destruction completed')
return error('Use install_master, join_master, or install_worker instead of generic install')
}

View File

@@ -4,6 +4,8 @@ import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
import incubaid.herolib.osal.startupmanager
import time
__global (
kubernetes_installer_global map[string]&KubernetesInstaller
@@ -125,22 +127,70 @@ pub fn play(mut plbook PlayBook) ! {
}
mut install_actions := plbook.find(filter: 'kubernetes_installer.configure')!
if install_actions.len > 0 {
return error("can't configure kubernetes_installer, because no configuration allowed for this installer.")
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
mut other_actions := plbook.find(filter: 'kubernetes_installer.')!
for mut other_action in other_actions {
if other_action.name in ['destroy', 'install'] {
mut p := other_action.params
reset := p.get_default_false('reset')
mut p := other_action.params
name := p.get_default('name', 'default')!
reset := p.get_default_false('reset')
mut k8s_obj := get(name: name, create: true)!
console.print_debug('action object:\n${k8s_obj}')
if other_action.name in ['destroy', 'install', 'build'] {
if other_action.name == 'destroy' || reset {
console.print_debug('install action kubernetes_installer.destroy')
destroy()!
k8s_obj.destroy()!
}
if other_action.name == 'install' {
console.print_debug('install action kubernetes_installer.install')
install()!
k8s_obj.install(reset: reset)!
}
}
if other_action.name in ['start', 'stop', 'restart'] {
if other_action.name == 'start' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.start()!
}
if other_action.name == 'stop' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.stop()!
}
if other_action.name == 'restart' {
console.print_debug('install action kubernetes_installer.${other_action.name}')
k8s_obj.restart()!
}
}
// K3s-specific actions
if other_action.name in ['install_master', 'join_master', 'install_worker'] {
if other_action.name == 'install_master' {
console.print_debug('install action kubernetes_installer.install_master')
k8s_obj.install_master()!
}
if other_action.name == 'join_master' {
console.print_debug('install action kubernetes_installer.join_master')
k8s_obj.join_master()!
}
if other_action.name == 'install_worker' {
console.print_debug('install action kubernetes_installer.install_worker')
k8s_obj.install_worker()!
}
}
if other_action.name == 'get_kubeconfig' {
console.print_debug('install action kubernetes_installer.get_kubeconfig')
kubeconfig := k8s_obj.get_kubeconfig()!
console.print_header('Kubeconfig:\n${kubeconfig}')
}
if other_action.name == 'generate_join_script' {
console.print_debug('install action kubernetes_installer.generate_join_script')
script := k8s_obj.generate_join_script()!
console.print_header('Join Script:\n${script}')
}
other_action.done = true
}
}
@@ -149,12 +199,102 @@ pub fn play(mut plbook PlayBook) ! {
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
fn startupmanager_get(cat startupmanager.StartupManagerType) !startupmanager.StartupManager {
match cat {
.screen {
console.print_debug("installer: kubernetes_installer' startupmanager get screen")
return startupmanager.get(.screen)!
}
.zinit {
console.print_debug("installer: kubernetes_installer' startupmanager get zinit")
return startupmanager.get(.zinit)!
}
.systemd {
console.print_debug("installer: kubernetes_installer' startupmanager get systemd")
return startupmanager.get(.systemd)!
}
else {
console.print_debug("installer: kubernetes_installer' startupmanager get auto")
return startupmanager.get(.auto)!
}
}
}
// load from disk and make sure is properly intialized
pub fn (mut self KubernetesInstaller) reload() ! {
switch(self.name)
self = obj_init(self)!
}
pub fn (mut self KubernetesInstaller) start() ! {
switch(self.name)
if self.running()! {
return
}
console.print_header('installer: kubernetes_installer start')
if !installed()! {
return error('K3s is not installed. Please run install_master, join_master, or install_worker first.')
}
configure()!
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
console.print_debug('installer: kubernetes_installer starting with ${zprocess.startuptype}...')
sm.new(zprocess)!
sm.start(zprocess.name)!
}
for _ in 0 .. 50 {
if self.running()! {
return
}
time.sleep(100 * time.millisecond)
}
return error('kubernetes_installer did not start properly.')
}
pub fn (mut self KubernetesInstaller) install_start(args InstallArgs) ! {
switch(self.name)
self.install(args)!
self.start()!
}
pub fn (mut self KubernetesInstaller) stop() ! {
switch(self.name)
for zprocess in self.startupcmd()! {
mut sm := startupmanager_get(zprocess.startuptype)!
sm.stop(zprocess.name)!
}
}
pub fn (mut self KubernetesInstaller) restart() ! {
switch(self.name)
self.stop()!
self.start()!
}
pub fn (mut self KubernetesInstaller) running() !bool {
switch(self.name)
// walk over the generic processes, if not running return
for zprocess in self.startupcmd()! {
if zprocess.startuptype != .screen {
mut sm := startupmanager_get(zprocess.startuptype)!
r := sm.running(zprocess.name)!
if r == false {
return false
}
}
}
return running()!
}
@[params]
pub struct InstallArgs {
pub mut:
@@ -170,6 +310,7 @@ pub fn (mut self KubernetesInstaller) install(args InstallArgs) ! {
pub fn (mut self KubernetesInstaller) destroy() ! {
switch(self.name)
self.stop() or {}
destroy()!
}

View File

@@ -1,27 +1,161 @@
module kubernetes_installer
import incubaid.herolib.data.encoderhero
import incubaid.herolib.osal.core as osal
import os
import rand
pub const version = '1.31.0'
pub const version = 'v1.33.1'
const singleton = true
const default = true
// Kubernetes installer - handles kubectl installation
// K3s installer - handles K3s cluster installation with Mycelium IPv6 networking
@[heap]
pub struct KubernetesInstaller {
pub mut:
name string = 'default'
name string = 'default'
// K3s version to install
k3s_version string = version
// Data directory for K3s (default: ~/hero/var/k3s)
data_dir string
// Unique node name/identifier
node_name string
// Mycelium interface name (default: mycelium0)
mycelium_interface string = 'mycelium0'
// Cluster token for authentication (auto-generated if empty)
token string
// Master URL for joining cluster (e.g., 'https://[ipv6]:6443')
master_url string
// Node IPv6 address (auto-detected from Mycelium if empty)
node_ip string
// Is this a master/control-plane node?
is_master bool
// Is this the first master (uses --cluster-init)?
is_first_master bool
}
// your checking & initialization code if needed
fn obj_init(mycfg_ KubernetesInstaller) !KubernetesInstaller {
mut mycfg := mycfg_
// Set default data directory if not provided
if mycfg.data_dir == '' {
mycfg.data_dir = os.join_path(os.home_dir(), 'hero/var/k3s')
}
// Expand home directory in data_dir if it contains ~
if mycfg.data_dir.starts_with('~') {
mycfg.data_dir = mycfg.data_dir.replace_once('~', os.home_dir())
}
// Set default node name if not provided
if mycfg.node_name == '' {
hostname := os.execute('hostname').output.trim_space()
mycfg.node_name = if hostname != '' { hostname } else { 'k3s-node-${rand.hex(4)}' }
}
// Generate token if not provided and this is the first master
if mycfg.token == '' && mycfg.is_first_master {
// Generate a secure random token
mycfg.token = rand.hex(32)
}
// Validate: join operations require token and master_url
if !mycfg.is_first_master && (mycfg.token == '' || mycfg.master_url == '') {
return error('Joining a cluster requires both token and master_url to be set')
}
return mycfg
}
// Get path to kubeconfig file
pub fn (self &KubernetesInstaller) kubeconfig_path() string {
return '${self.data_dir}/server/cred/admin.kubeconfig'
}
// Get Mycelium IPv6 address from interface
pub fn (self &KubernetesInstaller) get_mycelium_ipv6() !string {
// If node_ip is already set, use it
if self.node_ip != '' {
return self.node_ip
}
// Otherwise, detect from Mycelium interface
return get_mycelium_ipv6_from_interface(self.mycelium_interface)!
}
// Helper function to detect Mycelium IPv6 from interface
fn get_mycelium_ipv6_from_interface(iface string) !string {
// Step 1: Find the 400::/7 route via the interface
route_result := osal.exec(
cmd: 'ip -6 route | grep "^400::/7.*dev ${iface}"'
stdout: false
) or { return error('No 400::/7 route found via interface ${iface}') }
route_line := route_result.output.trim_space()
if route_line == '' {
return error('No 400::/7 route found via interface ${iface}')
}
// Step 2: Extract next-hop IPv6 and get prefix (first 4 segments)
// Parse: "400::/7 via <nexthop> dev <iface> ..."
parts := route_line.split(' ')
mut nexthop := ''
for i, part in parts {
if part == 'via' && i + 1 < parts.len {
nexthop = parts[i + 1]
break
}
}
if nexthop == '' {
return error('Could not extract next-hop from route: ${route_line}')
}
// Get first 4 segments of IPv6 address (prefix)
prefix_parts := nexthop.split(':')
if prefix_parts.len < 4 {
return error('Invalid IPv6 next-hop format: ${nexthop}')
}
prefix := prefix_parts[0..4].join(':')
// Step 3: Get all global IPv6 addresses on the interface
addr_result := osal.exec(
cmd: 'ip -6 addr show dev ${iface} scope global | grep inet6 | awk \'{print $2}\' | cut -d/ -f1'
stdout: false
)!
ipv6_list := addr_result.output.split_into_lines()
// Step 4: Match the one with the same prefix
for ip in ipv6_list {
ip_trimmed := ip.trim_space()
if ip_trimmed == '' {
continue
}
ip_parts := ip_trimmed.split(':')
if ip_parts.len >= 4 {
ip_prefix := ip_parts[0..4].join(':')
if ip_prefix == prefix {
return ip_trimmed
}
}
}
return error('No global IPv6 address found on ${iface} matching prefix ${prefix}')
}
// called before start if done
fn configure() ! {
// No configuration needed for kubectl
mut cfg := get()!
// Ensure data directory exists
osal.dir_ensure(cfg.data_dir)!
// Create manifests directory for auto-apply
manifests_dir := '${cfg.data_dir}/server/manifests'
osal.dir_ensure(manifests_dir)!
}
/////////////NORMALLY NO NEED TO TOUCH

View File

@@ -1,44 +1,212 @@
# kubernetes_installer
# K3s Installer
Complete K3s cluster installer with multi-master HA support, worker nodes, and Mycelium IPv6 networking.
## Features
To get started
- **Multi-Master HA**: Install multiple master nodes with `--cluster-init`
- **Worker Nodes**: Add worker nodes to the cluster
- **Mycelium IPv6**: Automatic detection of Mycelium IPv6 addresses from the 400::/7 range
- **Lifecycle Management**: Start, stop, restart K3s via startupmanager (systemd/zinit/screen)
- **Join Scripts**: Auto-generate heroscripts for joining additional nodes
- **Complete Cleanup**: Destroy removes all K3s components, network interfaces, and data
## Quick Start
### Install First Master
```v
import incubaid.herolib.installers.virt.kubernetes_installer
heroscript := "
!!kubernetes_installer.configure
name:'k3s_master_1'
k3s_version:'v1.33.1'
node_name:'master-1'
mycelium_interface:'mycelium0'
import incubaid.herolib.installers.something.kubernetes_installer as kubernetes_installer_installer
heroscript:="
!!kubernetes_installer.configure name:'test'
password: '1234'
port: 7701
!!kubernetes_installer.start name:'test' reset:1
!!kubernetes_installer.install_master name:'k3s_master_1'
!!kubernetes_installer.start name:'k3s_master_1'
"
kubernetes_installer_installer.play(heroscript=heroscript)!
//or we can call the default and do a start with reset
//mut installer:= kubernetes_installer_installer.get()!
//installer.start(reset:true)!
kubernetes_installer.play(heroscript: heroscript)!
```
## example heroscript
### Join Additional Master (HA)
```hero
```v
heroscript := "
!!kubernetes_installer.configure
homedir: '/home/user/kubernetes_installer'
username: 'admin'
password: 'secretpassword'
title: 'Some Title'
host: 'localhost'
port: 8888
name:'k3s_master_2'
node_name:'master-2'
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
!!kubernetes_installer.join_master name:'k3s_master_2'
!!kubernetes_installer.start name:'k3s_master_2'
"
kubernetes_installer.play(heroscript: heroscript)!
```
### Install Worker Node
```v
heroscript := "
!!kubernetes_installer.configure
name:'k3s_worker_1'
node_name:'worker-1'
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
!!kubernetes_installer.install_worker name:'k3s_worker_1'
!!kubernetes_installer.start name:'k3s_worker_1'
"
kubernetes_installer.play(heroscript: heroscript)!
```
## Configuration Options
| Field | Type | Default | Description |
|-------|------|---------|-------------|
| `name` | string | 'default' | Instance name |
| `k3s_version` | string | 'v1.33.1' | K3s version to install |
| `data_dir` | string | '~/hero/var/k3s' | Data directory for K3s |
| `node_name` | string | hostname | Unique node identifier |
| `mycelium_interface` | string | 'mycelium0' | Mycelium interface name |
| `token` | string | auto-generated | Cluster authentication token |
| `master_url` | string | - | Master URL for joining (e.g., 'https://[ipv6]:6443') |
| `node_ip` | string | auto-detected | Node IPv6 (auto-detected from Mycelium) |
## Actions
### Installation Actions
- `install_master` - Install first master node (generates token, uses --cluster-init)
- `join_master` - Join as additional master (requires token + master_url)
- `install_worker` - Install worker node (requires token + master_url)
### Lifecycle Actions
- `start` - Start K3s via startupmanager
- `stop` - Stop K3s
- `restart` - Restart K3s
- `destroy` - Complete cleanup (removes all K3s components)
### Utility Actions
- `get_kubeconfig` - Get kubeconfig content
- `generate_join_script` - Generate heroscript for joining nodes
## Requirements
- **OS**: Ubuntu (installer checks and fails on non-Ubuntu systems)
- **Mycelium**: Must be installed and running with interface in 400::/7 range
- **Root Access**: Required for installing system packages and managing network
## How It Works
### Mycelium IPv6 Detection
The installer automatically detects your Mycelium IPv6 address by:
1. Finding the 400::/7 route via the Mycelium interface
2. Extracting the next-hop IPv6 and getting the prefix (first 4 segments)
3. Matching global IPv6 addresses on the interface with the same prefix
4. Using the matched IPv6 for K3s `--node-ip`
This ensures K3s binds to the correct Mycelium IPv6 even if the server has other IPv6 addresses.
### Cluster Setup
**First Master:**
- Uses `--cluster-init` flag
- Auto-generates secure token
- Configures IPv6 CIDRs: cluster=2001:cafe:42::/56, service=2001:cafe:43::/112
- Generates join script for other nodes
**Additional Masters:**
- Joins with `--server <master_url>`
- Requires token and master_url from first master
- Provides HA for control plane
**Workers:**
- Joins as agent with `--server <master_url>`
- Requires token and master_url from first master
### Cleanup
The `destroy` action performs complete cleanup:
- Stops K3s process
- Removes network interfaces (cni0, flannel.*, etc.)
- Unmounts kubelet mounts
- Removes data directory
- Cleans up iptables/ip6tables rules
- Removes CNI namespaces
## Example Workflow
1. **Install first master on server1:**
```bash
hero run templates/examples.heroscript
# Note the token and IPv6 address displayed
```
2. **Join additional master on server2:**
```bash
# Edit examples.heroscript Section 2 with token and master_url
hero run templates/examples.heroscript
```
3. **Add worker on server3:**
```bash
# Edit examples.heroscript Section 3 with token and master_url
hero run templates/examples.heroscript
```
4. **Verify cluster:**
```bash
kubectl get nodes
kubectl get pods --all-namespaces
```
## Kubeconfig
The kubeconfig is located at: `<data_dir>/server/cred/admin.kubeconfig`
To use kubectl:
```bash
export KUBECONFIG=~/hero/var/k3s/server/cred/admin.kubeconfig
kubectl get nodes
```
Or copy to default location:
```bash
mkdir -p ~/.kube
cp ~/hero/var/k3s/server/cred/admin.kubeconfig ~/.kube/config
```
## Troubleshooting
**K3s won't start:**
- Check if Mycelium is running: `ip -6 addr show mycelium0`
- Verify 400::/7 route exists: `ip -6 route | grep 400::/7`
- Check logs: `journalctl -u k3s_* -f`
**Can't join cluster:**
- Verify token matches first master
- Ensure master_url uses correct IPv6 in brackets: `https://[ipv6]:6443`
- Check network connectivity over Mycelium: `ping6 <master_ipv6>`
**Cleanup issues:**
- Run destroy with sudo if needed
- Manually check for remaining processes: `pgrep -f k3s`
- Check for remaining mounts: `mount | grep k3s`
## See Also
- [K3s Documentation](https://docs.k3s.io/)
- [Mycelium Documentation](https://github.com/threefoldtech/mycelium)
- [Example Heroscript](templates/examples.heroscript)

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env hero
// ============================================================================
// K3s Cluster Installation Examples
// ============================================================================
//
// This file contains examples for installing K3s clusters with Mycelium IPv6
// networking. Choose the appropriate section based on your node type.
//
// Prerequisites:
// - Ubuntu OS
// - Mycelium installed and running
// - Mycelium interface (default: mycelium0)
// ============================================================================
// ============================================================================
// SECTION 1: Install First Master Node
// ============================================================================
// This creates the initial master node and initializes the cluster.
// The token will be auto-generated and displayed for use with other nodes.
!!kubernetes_installer.configure
name:'k3s_master_1'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'master-1'
mycelium_interface:'mycelium0'
// Install as first master (will generate token and use --cluster-init)
!!kubernetes_installer.install_master name:'k3s_master_1'
// Start K3s
!!kubernetes_installer.start name:'k3s_master_1'
// Get kubeconfig (optional - to verify installation)
// !!kubernetes_installer.get_kubeconfig name:'k3s_master_1'
// Generate join script for other nodes (optional)
// !!kubernetes_installer.generate_join_script name:'k3s_master_1'
// ============================================================================
// SECTION 2: Join as Additional Master (HA Setup)
// ============================================================================
// Use this to add more master nodes for high availability.
// You MUST have the token and master_url from the first master.
/*
!!kubernetes_installer.configure
name:'k3s_master_2'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'master-2'
mycelium_interface:'mycelium0'
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
// Join as additional master
!!kubernetes_installer.join_master name:'k3s_master_2'
// Start K3s
!!kubernetes_installer.start name:'k3s_master_2'
*/
// ============================================================================
// SECTION 3: Install Worker Node
// ============================================================================
// Use this to add worker nodes to the cluster.
// You MUST have the token and master_url from the first master.
/*
!!kubernetes_installer.configure
name:'k3s_worker_1'
k3s_version:'v1.33.1'
data_dir:'~/hero/var/k3s'
node_name:'worker-1'
mycelium_interface:'mycelium0'
token:'<TOKEN_FROM_FIRST_MASTER>'
master_url:'https://[<MASTER_IPV6>]:6443'
// Install as worker
!!kubernetes_installer.install_worker name:'k3s_worker_1'
// Start K3s
!!kubernetes_installer.start name:'k3s_worker_1'
*/
// ============================================================================
// SECTION 4: Lifecycle Management
// ============================================================================
// Common operations for managing K3s
// Stop K3s
// !!kubernetes_installer.stop name:'k3s_master_1'
// Restart K3s
// !!kubernetes_installer.restart name:'k3s_master_1'
// Get kubeconfig
// !!kubernetes_installer.get_kubeconfig name:'k3s_master_1'
// Destroy K3s (complete cleanup)
// !!kubernetes_installer.destroy name:'k3s_master_1'
// ============================================================================
// NOTES:
// ============================================================================
// 1. Replace <TOKEN_FROM_FIRST_MASTER> with the actual token displayed after
// installing the first master
// 2. Replace <MASTER_IPV6> with the Mycelium IPv6 address of the first master
// 3. The data_dir defaults to ~/hero/var/k3s if not specified
// 4. The mycelium_interface defaults to 'mycelium0' if not specified
// 5. The k3s_version defaults to 'v1.33.1' if not specified
// 6. After installation, use kubectl to manage your cluster:
// - kubectl get nodes
// - kubectl get pods --all-namespaces
// 7. The kubeconfig is located at: <data_dir>/server/cred/admin.kubeconfig