refactor: Refactor Kubernetes client and CryptPad installer

- Replace kubectl exec calls with Kubernetes client methods
- Improve error handling and logging in Kubernetes client
- Enhance node information retrieval and parsing
- Add comprehensive unit tests for Kubernetes client and Node structs
- Refine YAML validation to allow custom resource definitions
- Update CryptPad installer to use the refactored Kubernetes client
This commit is contained in:
Mahmoud-Emad
2025-10-30 17:58:03 +03:00
parent 82d37374d8
commit 80108d4b36
10 changed files with 817 additions and 74 deletions

1
examples/installers/k8s/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
cryptpad

View File

@@ -6,7 +6,7 @@ import incubaid.herolib.installers.k8s.cryptpad
// 1. Create a new installer instance with a specific hostname.
// Replace 'mycryptpad' with your desired hostname.
mut installer := cryptpad.new(hostname: 'mycryptpadtes2222tt')!
mut installer := cryptpad.new(hostname: 'omda')!
// 2. Install CryptPad.
// This will generate the necessary Kubernetes YAML files and apply them to your cluster.
@@ -16,4 +16,4 @@ println('CryptPad installation started.')
println('You can access it at https://${installer.hostname}.gent01.grid.tf')
// 3. To destroy the deployment, you can run the following:
//installer.destroy()!
// installer.destroy()!

View File

@@ -2,7 +2,6 @@ module zinit
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.ui.console
import json
__global (

View File

@@ -1,8 +1,6 @@
module zinit
import incubaid.herolib.data.encoderhero
import incubaid.herolib.schemas.jsonrpc
import os
pub const version = '0.0.0'
const singleton = true

View File

@@ -2,10 +2,9 @@ module cryptpad
import incubaid.herolib.osal.core as osal
import incubaid.herolib.ui.console
import incubaid.herolib.core.texttools
import incubaid.herolib.core
import incubaid.herolib.osal.startupmanager
import incubaid.herolib.installers.ulist
import incubaid.herolib.virt.kubernetes
import os
import strings
import time
@@ -17,23 +16,36 @@ fn startupcmd() ![]startupmanager.ZProcessNewArgs {
}
fn kubectl_installed() ! {
// Check if kubectl command exists
if !osal.cmd_exists('kubectl') {
return error('kubectl is not installed. Please install it to continue.')
}
// Check if kubectl is configured to connect to a cluster
res := osal.exec(cmd: 'kubectl cluster-info', ignore_error: true)!
if res.exit_code != 0 {
mut k8s := kubernetes.get()!
if !k8s.test_connection()! {
return error('kubectl is not configured to connect to a Kubernetes cluster. Please check your kubeconfig.')
}
}
fn running() !bool {
installer := get()!
res := osal.exec(
cmd: 'kubectl get deployment cryptpad -n ${installer.namespace}'
ignore_error: true
)!
return res.exit_code == 0
mut k8s := kubernetes.get()!
// Try to get the cryptpad deployment
deployments := k8s.get_deployments(installer.namespace) or {
// If we can't get deployments, it's not running
return false
}
// Check if cryptpad deployment exists
for deployment in deployments {
if deployment.name == 'cryptpad' {
return true
}
}
return false
}
fn start_pre() ! {
@@ -66,17 +78,21 @@ fn upload() ! {
fn get_master_node_ips() ![]string {
mut master_ips := []string{}
res := osal.exec(
cmd: 'kubectl get nodes -o jsonpath="{.items[*].status.addresses[?(@.type==\'InternalIP\')].address}" | tr \' \' \'\\n\' | grep \':\''
)!
if res.exit_code != 0 {
return error('Failed to get master node IPs: ${res.output}')
}
for ip in res.output.split('\n') {
if ip.len > 0 {
master_ips << ip
mut k8s := kubernetes.get()!
// Get all nodes using the kubernetes client
nodes := k8s.get_nodes()!
// Extract IPv6 internal IPs from all nodes (dual-stack support)
for node in nodes {
// Check all internal IPs (not just the first one) for IPv6 addresses
for ip in node.internal_ips {
if ip.len > 0 && ip.contains(':') {
master_ips << ip
}
}
}
return master_ips
}
@@ -90,6 +106,16 @@ pub mut:
fn install() ! {
console.print_header('Installing CryptPad...')
// Get installer config to access namespace
installer := get()!
if installer.hostname == '' {
return error('hostname is empty')
}
// Configure kubernetes client with the correct namespace
mut k8s := kubernetes.get()!
k8s.config.namespace = installer.namespace
// 1. Check for dependencies.
console.print_info('Checking for kubectl...')
kubectl_installed()!
@@ -102,10 +128,6 @@ fn install() ! {
// 3. Generate YAML files from templates.
console.print_info('Generating YAML files from templates...')
installer := get()!
if installer.hostname == '' {
return error('hostname is empty')
}
mut backends_str_builder := strings.new_builder(100)
for ip in master_ips {
@@ -126,11 +148,11 @@ fn install() ! {
os.write_file('/tmp/cryptpad.yaml', temp2)!
console.print_info('YAML files generated successfully.')
// 4. Apply the YAML files using `kubectl`.
// 4. Apply the YAML files using kubernetes client
console.print_info('Applying Gateway YAML file to the cluster...')
res1 := osal.exec(cmd: 'kubectl apply -f /tmp/tfgw-cryptpad.yaml')!
if res1.exit_code != 0 {
return error('Failed to apply tfgw-cryptpad.yaml: ${res1.output}')
res1 := k8s.apply_yaml('/tmp/tfgw-cryptpad.yaml')!
if !res1.success {
return error('Failed to apply tfgw-cryptpad.yaml: ${res1.stderr}')
}
console.print_info('Gateway YAML file applied successfully.')
@@ -140,9 +162,9 @@ fn install() ! {
// 6. Apply Cryptpad YAML
console.print_info('Applying Cryptpad YAML file to the cluster...')
res2 := osal.exec(cmd: 'kubectl apply -f /tmp/cryptpad.yaml')!
if res2.exit_code != 0 {
return error('Failed to apply cryptpad.yaml: ${res2.output}')
res2 := k8s.apply_yaml('/tmp/cryptpad.yaml')!
if !res2.success {
return error('Failed to apply cryptpad.yaml: ${res2.stderr}')
}
console.print_info('Cryptpad YAML file applied successfully.')
@@ -177,27 +199,36 @@ pub mut:
// Function for verifying the generating of of the FQDN using tfgw crd
fn verify_tfgw_deployment(args VerifyTfgwDeployment) ! {
console.print_info('Verifying TFGW deployment for ${args.tfgw_name}...')
mut k8s := kubernetes.get()!
mut is_fqdn_generated := false
for i in 0 .. 30 {
res := osal.exec(
cmd: 'kubectl get tfgw ${args.tfgw_name} -n ${args.namespace} -o jsonpath="{.status.fqdn}"'
ignore_error: true
)!
if res.exit_code == 0 && res.output != '' {
for i in 0 .. args.retry {
// Use kubectl_exec for custom resource (TFGW) with jsonpath
result := k8s.kubectl_exec(
command: 'get tfgw ${args.tfgw_name} -n ${args.namespace} -o jsonpath="{.status.fqdn}"'
) or {
console.print_info('Waiting for FQDN to be generated for ${args.tfgw_name}... (${i + 1}/${args.retry})')
time.sleep(2 * time.second)
continue
}
if result.success && result.stdout != '' {
is_fqdn_generated = true
break
}
console.print_info('Waiting for FQDN to be generated for ${args.tfgw_name}... (${i + 1}/30)')
console.print_info('Waiting for FQDN to be generated for ${args.tfgw_name}... (${i + 1}/${args.retry})')
time.sleep(2 * time.second)
}
if !is_fqdn_generated {
console.print_stderr('Failed to get FQDN for ${args.tfgw_name}.')
res := osal.exec(
cmd: 'kubectl describe tfgw ${args.tfgw_name} -n ${args.namespace}'
ignore_error: true
)!
console.print_stderr(res.output)
// Use describe_resource to get detailed information about the TFGW resource
result := k8s.describe_resource(
resource: 'tfgw'
resource_name: args.tfgw_name
namespace: args.namespace
) or { return error('TFGW deployment failed for ${args.tfgw_name}.') }
console.print_stderr(result.stdout)
return error('TFGW deployment failed for ${args.tfgw_name}.')
}
console.print_info('TFGW deployment for ${args.tfgw_name} verified successfully.')
@@ -206,9 +237,16 @@ fn verify_tfgw_deployment(args VerifyTfgwDeployment) ! {
fn destroy() ! {
console.print_header('Destroying CryptPad...')
installer := get()!
res := osal.exec(cmd: 'kubectl delete ns ${installer.namespace}', ignore_error: true)!
if res.exit_code != 0 {
console.print_stderr('Failed to delete namespace ${installer.namespace}: ${res.output}')
mut k8s := kubernetes.get()!
// Delete the namespace using kubernetes client
result := k8s.delete_resource('namespace', installer.namespace, '') or {
console.print_stderr('Failed to delete namespace ${installer.namespace}: ${err}')
return
}
if !result.success {
console.print_stderr('Failed to delete namespace ${installer.namespace}: ${result.stderr}')
} else {
console.print_info('Namespace ${installer.namespace} deleted.')
}

View File

@@ -1,10 +1,7 @@
module cryptpad
import incubaid.herolib.data.paramsparser
import incubaid.herolib.data.encoderhero
import os
import incubaid.herolib.ui.console
import incubaid.herolib.core.pathlib
pub const version = '1.0.0'
const singleton = true

View File

@@ -12,6 +12,15 @@ pub mut:
retry int
}
// Args for describing a resource
@[params]
pub struct DescribeResourceArgs {
pub mut:
resource string // Resource type: pod, node, service, deployment, tfgw, etc.
resource_name string // Name of the specific resource instance
namespace string // Namespace (empty string for cluster-scoped resources)
}
pub struct KubectlResult {
pub mut:
exit_code int
@@ -40,18 +49,52 @@ pub fn (mut k KubeClient) kubectl_exec(args KubectlExecArgs) !KubectlResult {
console.print_debug('executing: ${cmd}')
job := osal.exec(
cmd: cmd
timeout: args.timeout
retry: args.retry
raise_error: false
)!
// Check if this is a command that might produce large output
is_large_output := args.command.contains('get nodes') || args.command.contains('get pods')
|| args.command.contains('get deployments') || args.command.contains('get services')
return KubectlResult{
exit_code: job.exit_code
stdout: job.output
stderr: job.error
success: job.exit_code == 0
if is_large_output {
// Use exec_fast for large outputs (avoids 8KB buffer limit in osal.exec)
// exec_fast uses os.execute which doesn't have the pipe buffer limitation
result_output := osal.exec_fast(
cmd: cmd
ignore_error: true
) or { return error('Failed to execute kubectl command: ${err}') }
// Check if command succeeded by looking for error messages
if result_output.contains('Error from server') || result_output.contains('error:')
|| result_output.contains('Unable to connect') {
return KubectlResult{
exit_code: 1
stdout: result_output
stderr: result_output
success: false
}
}
return KubectlResult{
exit_code: 0
stdout: result_output
stderr: ''
success: result_output.len > 0
}
} else {
// Use regular exec for normal commands (supports timeout and proper error handling)
// Note: stdout must be true to prevent process from hanging when output buffer fills
job := osal.exec(
cmd: cmd
timeout: args.timeout
retry: args.retry
raise_error: false
stdout: true
)!
return KubectlResult{
exit_code: job.exit_code
stdout: job.output
stderr: job.error
success: job.exit_code == 0
}
}
}
@@ -244,6 +287,100 @@ pub fn (mut k KubeClient) get_services(namespace string) ![]Service {
return services
}
// Get nodes from cluster
pub fn (mut k KubeClient) get_nodes() ![]Node {
result := k.kubectl_exec(command: 'get nodes -o json')!
if !result.success {
return error('Failed to get nodes: ${result.stderr}')
}
// Parse JSON response using struct-based decoding
node_list := json.decode(KubectlNodeListResponse, result.stdout) or {
// Log error details for debugging
console.print_stderr('Failed to parse nodes JSON response')
console.print_stderr('Error: ${err}')
console.print_stderr('Response length: ${result.stdout.len} bytes')
if result.stdout.len > 0 {
console.print_stderr('First 200 chars: ${result.stdout[..if result.stdout.len < 200 {
result.stdout.len
} else {
200
}]}')
}
return error('Failed to parse nodes JSON: ${err}')
}
mut nodes := []Node{}
for item in node_list.items {
// Extract IP addresses (handle dual-stack: multiple IPs of same type)
mut internal_ips := []string{}
mut external_ips := []string{}
mut hostname := ''
for addr in item.status.addresses {
match addr.address_type {
'InternalIP' {
internal_ips << addr.address
}
'ExternalIP' {
external_ips << addr.address
}
'Hostname' {
hostname = addr.address
}
else {}
}
}
// For backward compatibility, use first internal/external IP
internal_ip := if internal_ips.len > 0 { internal_ips[0] } else { '' }
external_ip := if external_ips.len > 0 { external_ips[0] } else { '' }
// Determine node status from conditions
mut node_status := 'Unknown'
for condition in item.status.conditions {
if condition.condition_type == 'Ready' {
node_status = if condition.status == 'True' { 'Ready' } else { 'NotReady' }
break
}
}
// Extract roles from labels
mut roles := []string{}
for label_key, _ in item.metadata.labels {
if label_key.starts_with('node-role.kubernetes.io/') {
role := label_key.all_after('node-role.kubernetes.io/')
if role.len > 0 {
roles << role
}
}
}
// Create Node struct from kubectl response
node := Node{
name: item.metadata.name
internal_ip: internal_ip
external_ip: external_ip
internal_ips: internal_ips
external_ips: external_ips
hostname: hostname
status: node_status
roles: roles
kubelet_version: item.status.node_info.kubelet_version
os_image: item.status.node_info.os_image
kernel_version: item.status.node_info.kernel_version
container_runtime: item.status.node_info.container_runtime_version
labels: item.metadata.labels
created_at: item.metadata.creation_timestamp
}
nodes << node
}
return nodes
}
// Apply YAML file
pub fn (mut k KubeClient) apply_yaml(yaml_path string) !KubectlResult {
// Validate before applying
@@ -252,7 +389,10 @@ pub fn (mut k KubeClient) apply_yaml(yaml_path string) !KubectlResult {
return error('YAML validation failed: ${validation.errors.join(', ')}')
}
console.print_debug('Applying YAML file: ${yaml_path}')
result := k.kubectl_exec(command: 'apply -f ${yaml_path}')!
console.print_debug('Apply completed with exit code: ${result.exit_code}')
if result.success {
console.print_green('Applied: ${validation.kind}/${validation.metadata.name}')
}
@@ -265,13 +405,19 @@ pub fn (mut k KubeClient) delete_resource(kind string, name string, namespace st
return result
}
// Describe resource
pub fn (mut k KubeClient) describe_resource(kind string, name string, namespace string) !string {
result := k.kubectl_exec(command: 'describe ${kind} ${name} -n ${namespace}')!
if !result.success {
return error('Failed to describe resource: ${result.stderr}')
// Describe resource - provides detailed information about a specific resource
pub fn (mut k KubeClient) describe_resource(args DescribeResourceArgs) !KubectlResult {
// Build the describe command
mut cmd := 'describe ${args.resource} ${args.resource_name}'
// Only add namespace flag if namespace is not empty (for namespaced resources)
if args.namespace.len > 0 {
cmd += ' -n ${args.namespace}'
}
return result.stdout
// Execute the command
result := k.kubectl_exec(command: cmd)!
return result
}
// Port forward

View File

@@ -309,6 +309,52 @@ struct KubectlLoadBalancerIngress {
ip string
}
// Node list response from 'kubectl get nodes -o json'
struct KubectlNodeListResponse {
items []KubectlNodeItem
}
struct KubectlNodeItem {
metadata KubectlNodeMetadata
spec KubectlNodeSpec
status KubectlNodeStatus
}
struct KubectlNodeMetadata {
name string
labels map[string]string
creation_timestamp string @[json: creationTimestamp]
}
struct KubectlNodeSpec {
pod_cidr string @[json: podCIDR]
}
struct KubectlNodeStatus {
addresses []KubectlNodeAddress
conditions []KubectlNodeCondition
node_info KubectlNodeSystemInfo @[json: nodeInfo]
}
struct KubectlNodeAddress {
address string @[json: address]
address_type string @[json: type]
}
struct KubectlNodeCondition {
condition_type string @[json: type]
status string
}
struct KubectlNodeSystemInfo {
architecture string
kernel_version string @[json: kernelVersion]
os_image string @[json: osImage]
operating_system string @[json: operatingSystem]
kubelet_version string @[json: kubeletVersion]
container_runtime_version string @[json: containerRuntimeVersion]
}
// ============================================================================
// Runtime resource structs (returned from kubectl get commands)
// ============================================================================
@@ -352,6 +398,25 @@ pub mut:
created_at string
}
// Node runtime information
pub struct Node {
pub mut:
name string
internal_ip string // Primary internal IP (first in list)
external_ip string // Primary external IP (first in list)
internal_ips []string // All internal IPs (for dual-stack support)
external_ips []string // All external IPs (for dual-stack support)
hostname string
status string // Ready, NotReady, Unknown
roles []string
kubelet_version string
os_image string
kernel_version string
container_runtime string
labels map[string]string
created_at string
}
// Version information from kubectl version command
pub struct VersionInfo {
pub mut:

View File

@@ -1,5 +1,7 @@
module kubernetes
import json
// ============================================================================
// Unit Tests for Kubernetes Client Module
// These tests verify struct creation and data handling without executing
@@ -247,3 +249,491 @@ fn test_kubectl_result_error() ! {
assert result.exit_code == 1
assert result.stderr.contains('Error')
}
// ============================================================================
// Unit Tests for Node Struct and get_nodes() Method
// ============================================================================
// Test Node struct creation with all fields
fn test_node_struct_creation() ! {
mut node := Node{
name: 'worker-node-1'
internal_ip: '192.168.1.10'
external_ip: '203.0.113.10'
hostname: 'worker-node-1.example.com'
status: 'Ready'
roles: ['worker']
kubelet_version: 'v1.31.0'
os_image: 'Ubuntu 22.04.3 LTS'
kernel_version: '5.15.0-91-generic'
container_runtime: 'containerd://1.7.2'
labels: {
'kubernetes.io/hostname': 'worker-node-1'
'node-role.kubernetes.io/worker': ''
}
created_at: '2024-01-15T08:00:00Z'
}
assert node.name == 'worker-node-1'
assert node.internal_ip == '192.168.1.10'
assert node.external_ip == '203.0.113.10'
assert node.hostname == 'worker-node-1.example.com'
assert node.status == 'Ready'
assert node.roles.len == 1
assert node.roles[0] == 'worker'
assert node.kubelet_version == 'v1.31.0'
assert node.os_image == 'Ubuntu 22.04.3 LTS'
assert node.kernel_version == '5.15.0-91-generic'
assert node.container_runtime == 'containerd://1.7.2'
assert node.labels['kubernetes.io/hostname'] == 'worker-node-1'
assert node.created_at == '2024-01-15T08:00:00Z'
}
// Test Node struct with master role
fn test_node_struct_master_role() ! {
mut node := Node{
name: 'master-node-1'
status: 'Ready'
roles: ['control-plane', 'master']
}
assert node.name == 'master-node-1'
assert node.status == 'Ready'
assert node.roles.len == 2
assert 'control-plane' in node.roles
assert 'master' in node.roles
}
// Test Node struct with NotReady status
fn test_node_struct_not_ready() ! {
mut node := Node{
name: 'worker-node-2'
status: 'NotReady'
}
assert node.name == 'worker-node-2'
assert node.status == 'NotReady'
}
// Test Node struct with IPv6 internal IP
fn test_node_struct_ipv6() ! {
mut node := Node{
name: 'worker-node-3'
internal_ip: '2001:db8::1'
status: 'Ready'
}
assert node.name == 'worker-node-3'
assert node.internal_ip == '2001:db8::1'
assert node.internal_ip.contains(':')
assert node.status == 'Ready'
}
// Test Node struct with default values
fn test_node_default_values() ! {
mut node := Node{}
assert node.name == ''
assert node.internal_ip == ''
assert node.external_ip == ''
assert node.hostname == ''
assert node.status == ''
assert node.roles.len == 0
assert node.kubelet_version == ''
assert node.os_image == ''
assert node.kernel_version == ''
assert node.container_runtime == ''
assert node.labels.len == 0
assert node.created_at == ''
}
// Test Node struct with multiple roles
fn test_node_multiple_roles() ! {
mut node := Node{
name: 'control-plane-1'
roles: ['control-plane', 'master', 'etcd']
}
assert node.roles.len == 3
assert 'control-plane' in node.roles
assert 'master' in node.roles
assert 'etcd' in node.roles
}
// ============================================================================
// Unit Tests for DescribeResourceArgs Struct
// ============================================================================
// Test DescribeResourceArgs struct for namespaced resource
fn test_describe_resource_args_namespaced() ! {
mut args := DescribeResourceArgs{
resource: 'pod'
resource_name: 'nginx-pod'
namespace: 'default'
}
assert args.resource == 'pod'
assert args.resource_name == 'nginx-pod'
assert args.namespace == 'default'
}
// Test DescribeResourceArgs struct for cluster-scoped resource
fn test_describe_resource_args_cluster_scoped() ! {
mut args := DescribeResourceArgs{
resource: 'node'
resource_name: 'worker-node-1'
namespace: ''
}
assert args.resource == 'node'
assert args.resource_name == 'worker-node-1'
assert args.namespace == ''
}
// Test DescribeResourceArgs struct for custom resource (TFGW)
fn test_describe_resource_args_custom_resource() ! {
mut args := DescribeResourceArgs{
resource: 'tfgw'
resource_name: 'cryptpad-main'
namespace: 'cryptpad'
}
assert args.resource == 'tfgw'
assert args.resource_name == 'cryptpad-main'
assert args.namespace == 'cryptpad'
}
// ============================================================================
// JSON Parsing Tests for Kubectl Responses
// ============================================================================
// Test parsing kubectl node list JSON response
fn test_parse_kubectl_node_list_json() ! {
// Sample kubectl get nodes -o json response
json_response := '{
"items": [
{
"metadata": {
"name": "k3s-master",
"labels": {
"kubernetes.io/hostname": "k3s-master",
"node-role.kubernetes.io/control-plane": "",
"node-role.kubernetes.io/master": ""
},
"creationTimestamp": "2024-01-15T08:00:00Z"
},
"spec": {
"podCIDR": "10.42.0.0/24"
},
"status": {
"addresses": [
{
"type": "InternalIP",
"address": "192.168.1.100"
},
{
"type": "Hostname",
"address": "k3s-master"
}
],
"conditions": [
{
"type": "Ready",
"status": "True"
}
],
"nodeInfo": {
"architecture": "arm64",
"kernelVersion": "5.15.0-91-generic",
"osImage": "Ubuntu 22.04.3 LTS",
"operatingSystem": "linux",
"kubeletVersion": "v1.31.0+k3s1",
"containerRuntimeVersion": "containerd://1.7.11-k3s2"
}
}
}
]
}'
// Parse the JSON
node_list := json.decode(KubectlNodeListResponse, json_response)!
// Verify parsing
assert node_list.items.len == 1
assert node_list.items[0].metadata.name == 'k3s-master'
assert node_list.items[0].metadata.labels['kubernetes.io/hostname'] == 'k3s-master'
assert node_list.items[0].metadata.labels['node-role.kubernetes.io/control-plane'] == ''
assert node_list.items[0].spec.pod_cidr == '10.42.0.0/24'
assert node_list.items[0].status.addresses.len == 2
assert node_list.items[0].status.addresses[0].address_type == 'InternalIP'
assert node_list.items[0].status.addresses[0].address == '192.168.1.100'
assert node_list.items[0].status.addresses[1].address_type == 'Hostname'
assert node_list.items[0].status.addresses[1].address == 'k3s-master'
assert node_list.items[0].status.conditions.len == 1
assert node_list.items[0].status.conditions[0].condition_type == 'Ready'
assert node_list.items[0].status.conditions[0].status == 'True'
assert node_list.items[0].status.node_info.kubelet_version == 'v1.31.0+k3s1'
assert node_list.items[0].status.node_info.os_image == 'Ubuntu 22.04.3 LTS'
}
// Test parsing kubectl node list with IPv6 addresses
fn test_parse_kubectl_node_list_ipv6() ! {
json_response := '{
"items": [
{
"metadata": {
"name": "worker-node-1",
"labels": {
"node-role.kubernetes.io/worker": ""
},
"creationTimestamp": "2024-01-15T09:00:00Z"
},
"spec": {
"podCIDR": "10.42.1.0/24"
},
"status": {
"addresses": [
{
"type": "InternalIP",
"address": "2001:db8::1"
},
{
"type": "ExternalIP",
"address": "2001:db8:1::1"
},
{
"type": "Hostname",
"address": "worker-node-1"
}
],
"conditions": [
{
"type": "Ready",
"status": "True"
}
],
"nodeInfo": {
"architecture": "amd64",
"kernelVersion": "6.5.0-14-generic",
"osImage": "Ubuntu 23.10",
"operatingSystem": "linux",
"kubeletVersion": "v1.31.0",
"containerRuntimeVersion": "containerd://1.7.2"
}
}
}
]
}'
node_list := json.decode(KubectlNodeListResponse, json_response)!
assert node_list.items.len == 1
assert node_list.items[0].metadata.name == 'worker-node-1'
assert node_list.items[0].status.addresses.len == 3
// Verify IPv6 addresses
mut internal_ip := ''
mut external_ip := ''
for addr in node_list.items[0].status.addresses {
if addr.address_type == 'InternalIP' {
internal_ip = addr.address
}
if addr.address_type == 'ExternalIP' {
external_ip = addr.address
}
}
assert internal_ip == '2001:db8::1'
assert internal_ip.contains(':')
assert external_ip == '2001:db8:1::1'
assert external_ip.contains(':')
}
// Test parsing node with NotReady status
fn test_parse_kubectl_node_not_ready() ! {
json_response := '{
"items": [
{
"metadata": {
"name": "worker-node-2",
"labels": {},
"creationTimestamp": "2024-01-15T10:00:00Z"
},
"spec": {
"podCIDR": ""
},
"status": {
"addresses": [
{
"type": "InternalIP",
"address": "192.168.1.102"
}
],
"conditions": [
{
"type": "Ready",
"status": "False"
}
],
"nodeInfo": {
"architecture": "amd64",
"kernelVersion": "5.15.0-91-generic",
"osImage": "Ubuntu 22.04.3 LTS",
"operatingSystem": "linux",
"kubeletVersion": "v1.31.0",
"containerRuntimeVersion": "containerd://1.7.2"
}
}
}
]
}'
node_list := json.decode(KubectlNodeListResponse, json_response)!
assert node_list.items.len == 1
assert node_list.items[0].metadata.name == 'worker-node-2'
assert node_list.items[0].status.conditions[0].condition_type == 'Ready'
assert node_list.items[0].status.conditions[0].status == 'False'
}
// Test role extraction from node labels
fn test_node_role_extraction() ! {
json_response := '{
"items": [
{
"metadata": {
"name": "control-plane-1",
"labels": {
"node-role.kubernetes.io/control-plane": "",
"node-role.kubernetes.io/master": "",
"node-role.kubernetes.io/etcd": "",
"kubernetes.io/hostname": "control-plane-1"
},
"creationTimestamp": "2024-01-15T08:00:00Z"
},
"spec": {
"podCIDR": "10.42.0.0/24"
},
"status": {
"addresses": [
{
"type": "InternalIP",
"address": "192.168.1.100"
}
],
"conditions": [
{
"type": "Ready",
"status": "True"
}
],
"nodeInfo": {
"architecture": "arm64",
"kernelVersion": "5.15.0-91-generic",
"osImage": "Ubuntu 22.04.3 LTS",
"operatingSystem": "linux",
"kubeletVersion": "v1.31.0",
"containerRuntimeVersion": "containerd://1.7.2"
}
}
}
]
}'
node_list := json.decode(KubectlNodeListResponse, json_response)!
assert node_list.items.len == 1
// Extract roles from labels
mut roles := []string{}
for label_key, _ in node_list.items[0].metadata.labels {
if label_key.starts_with('node-role.kubernetes.io/') {
role := label_key.all_after('node-role.kubernetes.io/')
if role.len > 0 {
roles << role
}
}
}
// Verify roles were extracted
assert roles.len == 3
assert 'control-plane' in roles
assert 'master' in roles
assert 'etcd' in roles
}
// Test empty node list
fn test_parse_empty_node_list() ! {
json_response := '{
"items": []
}'
node_list := json.decode(KubectlNodeListResponse, json_response)!
assert node_list.items.len == 0
}
// Test dual-stack node (multiple InternalIP addresses)
fn test_parse_dual_stack_node() ! {
json_response := '{
"items": [
{
"metadata": {
"name": "dual-stack-node",
"labels": {
"node-role.kubernetes.io/control-plane": "true"
},
"creationTimestamp": "2025-10-29T12:40:47Z"
},
"spec": {
"podCIDR": "10.42.0.0/24"
},
"status": {
"addresses": [
{
"type": "InternalIP",
"address": "10.20.3.2"
},
{
"type": "InternalIP",
"address": "477:a3a5:7595:d3da:ff0f:ece1:204e:6691"
},
{
"type": "Hostname",
"address": "dual-stack-node"
}
],
"conditions": [
{
"type": "Ready",
"status": "True"
}
],
"nodeInfo": {
"architecture": "amd64",
"kernelVersion": "5.15.0-91-generic",
"osImage": "Ubuntu 22.04.3 LTS",
"operatingSystem": "linux",
"kubeletVersion": "v1.31.0+k3s1",
"containerRuntimeVersion": "containerd://1.7.11-k3s2"
}
}
}
]
}'
node_list := json.decode(KubectlNodeListResponse, json_response)!
assert node_list.items.len == 1
assert node_list.items[0].metadata.name == 'dual-stack-node'
// Verify we have 2 InternalIP addresses
assert node_list.items[0].status.addresses.len == 3
mut internal_ip_count := 0
for addr in node_list.items[0].status.addresses {
if addr.address_type == 'InternalIP' {
internal_ip_count++
}
}
assert internal_ip_count == 2
}

View File

@@ -46,11 +46,20 @@ pub fn yaml_validate(yaml_path string) !K8sValidationResult {
errors << 'Missing metadata.name field'
}
// Validate kind values
valid_kinds := ['Pod', 'Deployment', 'Service', 'ConfigMap', 'Secret', 'StatefulSet', 'DaemonSet',
'Job', 'CronJob', 'Ingress', 'PersistentVolume', 'PersistentVolumeClaim']
if kind !in valid_kinds {
errors << 'Invalid kind: ${kind}. Valid kinds: ${valid_kinds.join(', ')}'
// Validate kind values for standard Kubernetes resources
// Allow custom resources (CRDs) which typically have non-standard apiVersions
standard_kinds := ['Pod', 'Deployment', 'Service', 'ConfigMap', 'Secret', 'StatefulSet',
'DaemonSet', 'Job', 'CronJob', 'Ingress', 'PersistentVolume', 'PersistentVolumeClaim',
'Namespace', 'ServiceAccount', 'Role', 'RoleBinding', 'ClusterRole', 'ClusterRoleBinding']
// Check if it's a standard Kubernetes resource or a custom resource
is_standard_api := api_version.starts_with('v1') || api_version.starts_with('apps/')
|| api_version.starts_with('batch/') || api_version.starts_with('networking.k8s.io/')
|| api_version.starts_with('rbac.authorization.k8s.io/')
// Only validate kind for standard Kubernetes resources
if is_standard_api && kind !in standard_kinds {
errors << 'Invalid kind: ${kind}. Valid kinds for standard resources: ${standard_kinds.join(', ')}'
}
return K8sValidationResult{