feat: Implement complete Zero OS Alpine Initramfs Builder

- Complete bash framework with strict error handling
- Modular library system (docker, alpine, components, initramfs, kernel, testing)
- Rust component integration (zinit, rfs, mycelium) with musl targeting
- Rootless Docker/Podman support for GitHub Actions
- Centralized configuration in config/build.conf
- 2-stage module loading system
- Strip + UPX optimization for minimal size
- Complete zinit integration replacing OpenRC
- GitHub Actions CI/CD pipeline
- Comprehensive documentation and usage guides

Components:
- Latest stable kernel 6.12.44
- Alpine Linux 3.22 base
- ThreeFold components: zinit, mycelium, rfs, corex
- Target: ~8-12MB final initramfs.cpio.xz
This commit is contained in:
2025-08-31 12:31:49 +02:00
commit 860b9aa161
81 changed files with 30118 additions and 0 deletions

314
scripts/build.sh Executable file
View File

@@ -0,0 +1,314 @@
#!/bin/bash
# Main orchestrator script for Zero OS Alpine Initramfs Builder
set -euo pipefail
# Script directory and project root detection
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# Source all libraries
source "${SCRIPT_DIR}/lib/common.sh"
source "${SCRIPT_DIR}/lib/docker.sh"
source "${SCRIPT_DIR}/lib/alpine.sh"
source "${SCRIPT_DIR}/lib/components.sh"
source "${SCRIPT_DIR}/lib/initramfs.sh"
source "${SCRIPT_DIR}/lib/kernel.sh"
source "${SCRIPT_DIR}/lib/testing.sh"
# Build configuration loaded from config/build.conf via common.sh
# Environment variables can override config file values
ALPINE_VERSION="${ALPINE_VERSION:-3.22}"
KERNEL_VERSION="${KERNEL_VERSION:-6.12.44}"
RUST_TARGET="${RUST_TARGET:-x86_64-unknown-linux-musl}"
OPTIMIZATION_LEVEL="${OPTIMIZATION_LEVEL:-max}"
# Directory configuration
export INSTALL_DIR="${PROJECT_ROOT}/initramfs"
export COMPONENTS_DIR="${PROJECT_ROOT}/components"
export KERNEL_DIR="${PROJECT_ROOT}/kernel"
export DIST_DIR="${PROJECT_ROOT}/dist"
# Configuration files
CONFIG_DIR="${PROJECT_ROOT}/config"
PACKAGES_LIST="${CONFIG_DIR}/packages.list"
SOURCES_CONF="${CONFIG_DIR}/sources.conf"
MODULES_CONF="${CONFIG_DIR}/modules.conf"
KERNEL_CONFIG="${CONFIG_DIR}/kernel.config"
ZINIT_CONFIG_DIR="${CONFIG_DIR}/zinit"
# Build options
USE_CONTAINER="${USE_CONTAINER:-auto}"
CLEAN_BUILD="${CLEAN_BUILD:-false}"
SKIP_TESTS="${SKIP_TESTS:-false}"
KEEP_ARTIFACTS="${KEEP_ARTIFACTS:-false}"
# Display usage information
function show_usage() {
cat << EOF
Zero OS Alpine Initramfs Builder
Usage: $0 [OPTIONS]
Options:
--container Force container build
--no-container Force native build
--clean Clean build (remove all artifacts first)
--skip-tests Skip boot tests
--keep-artifacts Keep build artifacts after completion
--help Show this help message
Environment Variables:
ALPINE_VERSION Alpine Linux version (default: 3.22)
KERNEL_VERSION Linux kernel version (default: 6.8.8)
RUST_TARGET Rust compilation target (default: x86_64-unknown-linux-musl)
OPTIMIZATION_LEVEL Optimization level: max|size|speed (default: max)
DEBUG Enable debug output (default: 0)
Examples:
$0 # Basic build
$0 --clean # Clean build
$0 --container # Force container build
DEBUG=1 $0 # Build with debug output
EOF
}
# Parse command line arguments
function parse_arguments() {
while [[ $# -gt 0 ]]; do
case $1 in
--container)
USE_CONTAINER="true"
shift
;;
--no-container)
USE_CONTAINER="false"
shift
;;
--clean)
CLEAN_BUILD="true"
shift
;;
--skip-tests)
SKIP_TESTS="true"
shift
;;
--keep-artifacts)
KEEP_ARTIFACTS="true"
shift
;;
--help|-h)
show_usage
exit 0
;;
*)
log_error "Unknown option: $1"
show_usage
exit 1
;;
esac
done
}
# Setup build environment
function setup_build_environment() {
section_header "Setting up build environment"
log_info "Project root: ${PROJECT_ROOT}"
log_info "Alpine version: ${ALPINE_VERSION}"
log_info "Kernel version: ${KERNEL_VERSION}"
log_info "Rust target: ${RUST_TARGET}"
log_info "Optimization level: ${OPTIMIZATION_LEVEL}"
# Create build directories
safe_mkdir "$INSTALL_DIR"
safe_mkdir "$COMPONENTS_DIR"
safe_mkdir "$KERNEL_DIR"
safe_mkdir "$DIST_DIR"
# Check dependencies
if ! check_dependencies; then
log_error "Dependency check failed"
return 1
fi
# Verify configuration files exist
verify_configuration_files
log_info "Build environment setup complete"
}
# Verify all required configuration files exist
function verify_configuration_files() {
section_header "Verifying Configuration Files"
local required_configs=(
"$PACKAGES_LIST"
"$SOURCES_CONF"
"$MODULES_CONF"
"$KERNEL_CONFIG"
)
local missing_configs=()
for config in "${required_configs[@]}"; do
if [[ ! -f "$config" ]]; then
missing_configs+=("$config")
else
log_info "✓ Configuration found: $(basename "$config")"
fi
done
if [[ ${#missing_configs[@]} -gt 0 ]]; then
log_error "Missing configuration files:"
for config in "${missing_configs[@]}"; do
log_error " - $config"
done
log_error "Run the setup script or create configuration files manually"
return 1
fi
# Check zinit configuration directory
if [[ ! -d "$ZINIT_CONFIG_DIR" ]]; then
log_error "zinit configuration directory not found: ${ZINIT_CONFIG_DIR}"
return 1
fi
log_info "All configuration files verified"
}
# Main build process
function main_build_process() {
section_header "Starting Zero OS Alpine Initramfs Build"
local start_time=$(date +%s)
# Phase 1: Extract Alpine miniroot
alpine_extract_miniroot "$INSTALL_DIR" "$ALPINE_VERSION"
# Phase 2: Configure Alpine system
alpine_configure_repos "$INSTALL_DIR" "$ALPINE_VERSION"
alpine_configure_system "$INSTALL_DIR"
# Phase 3: Install Alpine packages (NO OpenRC)
alpine_install_packages "$INSTALL_DIR" "$PACKAGES_LIST"
# Phase 4: Build and install ThreeFold components
components_parse_sources_conf "$SOURCES_CONF" "$COMPONENTS_DIR"
# Phase 5: Verify component installation
components_verify_installation
# Phase 6: Setup zinit as init system
initramfs_setup_zinit "$INSTALL_DIR" "$ZINIT_CONFIG_DIR"
# Phase 7: Setup 2-stage module loading
initramfs_setup_modules "$INSTALL_DIR" "$MODULES_CONF" "$KERNEL_VERSION"
# Phase 8: Aggressive cleanup for size optimization
alpine_aggressive_cleanup "$INSTALL_DIR"
# Phase 9: Strip and UPX all binaries
initramfs_strip_and_upx "$INSTALL_DIR"
# Phase 10: Validate initramfs
initramfs_validate "$INSTALL_DIR"
# Phase 11: Create initramfs archive
local initramfs_archive="${DIST_DIR}/initramfs.cpio.xz"
initramfs_create_cpio "$INSTALL_DIR" "$initramfs_archive"
# Phase 12: Test archive integrity
initramfs_test_archive "$initramfs_archive"
# Phase 13: Build kernel with embedded initramfs
local kernel_output="${DIST_DIR}/vmlinuz.efi"
kernel_build_with_initramfs "$KERNEL_CONFIG" "$initramfs_archive" "$kernel_output"
# Phase 14: Run boot tests (unless skipped)
if [[ "$SKIP_TESTS" != "true" ]]; then
testing_run_all "$kernel_output"
else
log_info "Skipping boot tests as requested"
fi
# Calculate build time
local end_time=$(date +%s)
local build_time=$((end_time - start_time))
local build_minutes=$((build_time / 60))
local build_seconds=$((build_time % 60))
section_header "Build Complete"
log_info "Build time: ${build_minutes}m ${build_seconds}s"
log_info "Output files:"
log_info " Kernel: ${kernel_output} ($(get_file_size "$kernel_output"))"
log_info " Initramfs: ${initramfs_archive} ($(get_file_size "$initramfs_archive"))"
}
# Cleanup build artifacts
function cleanup_build_artifacts() {
if [[ "$KEEP_ARTIFACTS" != "true" ]]; then
section_header "Cleaning Build Artifacts"
components_cleanup "$COMPONENTS_DIR" "false"
kernel_cleanup "$KERNEL_DIR" "false"
log_info "Build artifacts cleaned"
else
log_info "Keeping build artifacts as requested"
fi
}
# Main function
function main() {
# Parse command line arguments
parse_arguments "$@"
# Show banner
echo ""
echo "=================================================="
echo "== ZERO-OS ALPINE INITRAMFS BUILDER =="
echo "== ThreeFold Edition =="
echo "=================================================="
echo ""
# Clean build if requested
if [[ "$CLEAN_BUILD" == "true" ]]; then
section_header "Clean Build Requested"
"$SCRIPT_DIR/clean.sh"
fi
# Setup environment
setup_build_environment
# Determine build method
if [[ "$USE_CONTAINER" == "auto" ]]; then
if in_container; then
log_info "Already in container, using native build"
main_build_process
elif command_exists "podman" || command_exists "docker"; then
log_info "Container runtime available, using container build"
docker_detect_runtime
docker_build_container
docker_run_build "./scripts/build.sh --no-container"
else
log_info "No container runtime, using native build"
main_build_process
fi
elif [[ "$USE_CONTAINER" == "true" ]]; then
docker_detect_runtime
docker_build_container
docker_run_build "./scripts/build.sh --no-container"
else
main_build_process
fi
# Cleanup if requested
cleanup_build_artifacts
section_header "Zero OS Build Complete"
log_info "Ready to deploy Zero OS with Alpine Linux and zinit"
}
# Run main function with all arguments
main "$@"

268
scripts/clean.sh Executable file
View File

@@ -0,0 +1,268 @@
#!/bin/bash
# Cleanup script for Zero OS Alpine Initramfs Builder
set -euo pipefail
# Script directory and project root detection
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# Source common functions
source "${SCRIPT_DIR}/lib/common.sh"
# Cleanup configuration
CLEAN_ALL="${CLEAN_ALL:-false}"
CLEAN_DOWNLOADS="${CLEAN_DOWNLOADS:-false}"
CLEAN_CONTAINER="${CLEAN_CONTAINER:-false}"
# Display usage information
function show_usage() {
cat << EOF
Zero OS Build Cleanup Script
Usage: $0 [OPTIONS]
Options:
--all Clean everything (artifacts + downloads + containers)
--downloads Clean downloaded sources and components
--containers Clean container images
--artifacts-only Clean only build artifacts (default)
--help Show this help message
Environment Variables:
CLEAN_ALL Clean everything (default: false)
CLEAN_DOWNLOADS Clean downloaded sources (default: false)
CLEAN_CONTAINER Clean container images (default: false)
Examples:
$0 # Clean build artifacts only
$0 --all # Complete cleanup
$0 --downloads # Clean sources and keep artifacts
EOF
}
# Parse command line arguments
function parse_arguments() {
while [[ $# -gt 0 ]]; do
case $1 in
--all)
CLEAN_ALL="true"
CLEAN_DOWNLOADS="true"
CLEAN_CONTAINER="true"
shift
;;
--downloads)
CLEAN_DOWNLOADS="true"
shift
;;
--containers)
CLEAN_CONTAINER="true"
shift
;;
--artifacts-only)
# This is the default, no action needed
shift
;;
--help|-h)
show_usage
exit 0
;;
*)
log_error "Unknown option: $1"
show_usage
exit 1
;;
esac
done
}
# Clean build artifacts
function clean_build_artifacts() {
section_header "Cleaning Build Artifacts"
local artifacts_to_clean=(
"${PROJECT_ROOT}/initramfs"
"${PROJECT_ROOT}/dist"
)
for artifact in "${artifacts_to_clean[@]}"; do
if [[ -d "$artifact" ]]; then
log_info "Removing: $artifact"
safe_rmdir "$artifact"
else
log_debug "Already clean: $artifact"
fi
done
# Clean temporary files
local temp_files=(
"/tmp/alpine-miniroot*.tar.gz"
"/tmp/linux-*.tar.xz"
"/tmp/qemu-*.log"
"/tmp/cloud-hypervisor-*.log"
)
for pattern in "${temp_files[@]}"; do
if ls $pattern 2>/dev/null; then
log_info "Removing temporary files: $pattern"
safe_execute rm -f $pattern
fi
done
log_info "Build artifacts cleaned"
}
# Clean downloaded sources and components
function clean_downloads() {
section_header "Cleaning Downloaded Sources and Components"
local download_dirs=(
"${PROJECT_ROOT}/components"
"${PROJECT_ROOT}/kernel"
)
for dir in "${download_dirs[@]}"; do
if [[ -d "$dir" ]]; then
log_info "Removing: $dir"
safe_rmdir "$dir"
else
log_debug "Already clean: $dir"
fi
done
# Clean Rust cache if it exists in project
local rust_cache="${PROJECT_ROOT}/.cargo"
if [[ -d "$rust_cache" ]]; then
log_info "Removing Rust cache: $rust_cache"
safe_rmdir "$rust_cache"
fi
log_info "Downloads and sources cleaned"
}
# Clean container images
function clean_container_images() {
section_header "Cleaning Container Images"
# Source docker functions if available
if [[ -f "${SCRIPT_DIR}/lib/docker.sh" ]]; then
source "${SCRIPT_DIR}/lib/docker.sh"
# Detect container runtime
if docker_detect_runtime 2>/dev/null; then
docker_cleanup "false"
else
log_info "No container runtime detected"
fi
else
log_warn "Docker library not found, manual container cleanup may be needed"
fi
log_info "Container images cleaned"
}
# Show disk space recovery
function show_space_recovery() {
section_header "Disk Space Recovery"
# Calculate space in current directory
local current_usage=$(du -sh "${PROJECT_ROOT}" 2>/dev/null | cut -f1 || echo "unknown")
log_info "Current project size: ${current_usage}"
# Show what was cleaned
if [[ "$CLEAN_ALL" == "true" ]]; then
log_info "Complete cleanup performed:"
log_info " ✓ Build artifacts removed"
log_info " ✓ Downloaded sources removed"
log_info " ✓ Container images removed"
elif [[ "$CLEAN_DOWNLOADS" == "true" ]]; then
log_info "Partial cleanup performed:"
log_info " ✓ Build artifacts removed"
log_info " ✓ Downloaded sources removed"
log_info " - Container images preserved"
else
log_info "Minimal cleanup performed:"
log_info " ✓ Build artifacts removed"
log_info " - Downloaded sources preserved"
log_info " - Container images preserved"
fi
}
# Verify cleanup was successful
function verify_cleanup() {
section_header "Verifying Cleanup"
local remaining_artifacts=()
# Check if artifacts were actually removed
if [[ -d "${PROJECT_ROOT}/initramfs" ]]; then
remaining_artifacts+=("initramfs/")
fi
if [[ -d "${PROJECT_ROOT}/dist" ]]; then
remaining_artifacts+=("dist/")
fi
if [[ "$CLEAN_DOWNLOADS" == "true" ]]; then
if [[ -d "${PROJECT_ROOT}/components" ]]; then
remaining_artifacts+=("components/")
fi
if [[ -d "${PROJECT_ROOT}/kernel" ]]; then
remaining_artifacts+=("kernel/")
fi
fi
if [[ ${#remaining_artifacts[@]} -gt 0 ]]; then
log_warn "Some artifacts may not have been cleaned:"
for artifact in "${remaining_artifacts[@]}"; do
log_warn " - $artifact"
done
return 1
else
log_info "Cleanup verification passed"
return 0
fi
}
# Main function
function main() {
# Parse command line arguments
parse_arguments "$@"
echo ""
echo "=================================================="
echo "== ZERO-OS BUILD CLEANUP =="
echo "=================================================="
echo ""
log_info "Starting cleanup process"
log_info "Clean all: ${CLEAN_ALL}"
log_info "Clean downloads: ${CLEAN_DOWNLOADS}"
log_info "Clean containers: ${CLEAN_CONTAINER}"
# Always clean build artifacts
clean_build_artifacts
# Clean downloads if requested
if [[ "$CLEAN_DOWNLOADS" == "true" ]]; then
clean_downloads
fi
# Clean containers if requested
if [[ "$CLEAN_CONTAINER" == "true" ]]; then
clean_container_images
fi
# Show space recovery
show_space_recovery
# Verify cleanup
verify_cleanup
section_header "Cleanup Complete"
log_info "Project cleaned successfully"
}
# Run main function with all arguments
main "$@"

354
scripts/lib/alpine.sh Normal file
View File

@@ -0,0 +1,354 @@
#!/bin/bash
# Alpine miniroot and package operations
# Source common functions
LIB_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${LIB_SCRIPT_DIR}/common.sh"
# Alpine configuration
ALPINE_VERSION="${ALPINE_VERSION:-3.22}"
ALPINE_ARCH="${ALPINE_ARCH:-x86_64}"
ALPINE_MIRROR="${ALPINE_MIRROR:-https://dl-cdn.alpinelinux.org/alpine}"
# Extract Alpine miniroot to target directory
function alpine_extract_miniroot() {
local target_dir="$1"
local version="${2:-$ALPINE_VERSION}"
local arch="${3:-$ALPINE_ARCH}"
section_header "Extracting Alpine Miniroot"
local url="${ALPINE_MIRROR}/v${version}/releases/${arch}/alpine-minirootfs-${version}.0-${arch}.tar.gz"
local temp_file="/tmp/alpine-miniroot-${version}-${arch}.tar.gz"
log_info "Alpine version: ${version}"
log_info "Architecture: ${arch}"
log_info "Target directory: ${target_dir}"
# Clean target directory
if [[ -d "$target_dir" ]]; then
log_info "Cleaning existing target directory"
safe_rmdir "$target_dir"
fi
safe_mkdir "$target_dir"
# Download miniroot
log_info "Downloading Alpine miniroot from: ${url}"
safe_execute wget --progress=dot:giga -O "$temp_file" "$url"
# Verify download
if [[ ! -f "$temp_file" ]]; then
log_error "Failed to download Alpine miniroot"
return 1
fi
local file_size=$(get_file_size "$temp_file")
log_info "Downloaded miniroot size: ${file_size}"
# Extract miniroot
log_info "Extracting miniroot to: ${target_dir}"
safe_execute tar -xzf "$temp_file" -C "$target_dir"
# Cleanup download
safe_execute rm "$temp_file"
# Verify extraction
if [[ ! -f "${target_dir}/etc/alpine-release" ]]; then
log_error "Alpine miniroot extraction failed - missing alpine-release"
return 1
fi
local alpine_release=$(cat "${target_dir}/etc/alpine-release")
log_info "Extracted Alpine release: ${alpine_release}"
log_info "Alpine miniroot extraction complete"
}
# Setup chroot environment for package operations
function alpine_setup_chroot() {
local initramfs_dir="$1"
section_header "Setting Up Alpine Chroot Environment"
# Create essential directories
safe_mkdir "${initramfs_dir}/proc"
safe_mkdir "${initramfs_dir}/sys"
safe_mkdir "${initramfs_dir}/dev"
safe_mkdir "${initramfs_dir}/dev/pts"
safe_mkdir "${initramfs_dir}/tmp"
safe_mkdir "${initramfs_dir}/run"
# Mount essential filesystems
log_info "Mounting essential filesystems in chroot"
if ! mountpoint -q "${initramfs_dir}/proc" 2>/dev/null; then
safe_execute mount --bind /proc "${initramfs_dir}/proc"
export CLEANUP_MOUNTS="${CLEANUP_MOUNTS:-} ${initramfs_dir}/proc"
fi
if ! mountpoint -q "${initramfs_dir}/sys" 2>/dev/null; then
safe_execute mount --bind /sys "${initramfs_dir}/sys"
export CLEANUP_MOUNTS="${CLEANUP_MOUNTS:-} ${initramfs_dir}/sys"
fi
if ! mountpoint -q "${initramfs_dir}/dev" 2>/dev/null; then
safe_execute mount --bind /dev "${initramfs_dir}/dev"
export CLEANUP_MOUNTS="${CLEANUP_MOUNTS:-} ${initramfs_dir}/dev"
fi
if ! mountpoint -q "${initramfs_dir}/dev/pts" 2>/dev/null; then
safe_execute mount --bind /dev/pts "${initramfs_dir}/dev/pts"
export CLEANUP_MOUNTS="${CLEANUP_MOUNTS:-} ${initramfs_dir}/dev/pts"
fi
# Setup resolv.conf for package downloads
if [[ -f /etc/resolv.conf ]]; then
safe_copy /etc/resolv.conf "${initramfs_dir}/etc/resolv.conf"
fi
log_info "Chroot environment setup complete"
}
# Cleanup chroot environment
function alpine_cleanup_chroot() {
local initramfs_dir="$1"
section_header "Cleaning Up Alpine Chroot Environment"
# Unmount filesystems in reverse order
local mounts=(
"${initramfs_dir}/dev/pts"
"${initramfs_dir}/dev"
"${initramfs_dir}/sys"
"${initramfs_dir}/proc"
)
for mount in "${mounts[@]}"; do
if mountpoint -q "$mount" 2>/dev/null; then
log_info "Unmounting: $mount"
safe_execute umount "$mount" || log_warn "Failed to unmount $mount"
fi
done
# Clear cleanup list
export CLEANUP_MOUNTS=""
log_info "Chroot cleanup complete"
}
# Install packages from packages.list (NO OpenRC)
function alpine_install_packages() {
local initramfs_dir="$1"
local packages_file="$2"
section_header "Installing Alpine Packages"
if [[ ! -f "$packages_file" ]]; then
log_error "Packages file not found: ${packages_file}"
return 1
fi
# Setup chroot environment
alpine_setup_chroot "$initramfs_dir"
# Update package repositories
log_info "Updating package repositories"
safe_execute chroot "$initramfs_dir" apk update
# Read packages from file (excluding comments and empty lines)
local packages=()
while IFS= read -r line; do
# Skip comments and empty lines
if [[ "$line" =~ ^[[:space:]]*# ]] || [[ -z "${line// }" ]]; then
continue
fi
packages+=("$line")
done < "$packages_file"
if [[ ${#packages[@]} -eq 0 ]]; then
log_warn "No packages found in ${packages_file}"
alpine_cleanup_chroot "$initramfs_dir"
return 0
fi
log_info "Installing ${#packages[@]} packages:"
for pkg in "${packages[@]}"; do
log_info " - $pkg"
done
# Install packages (NO OpenRC - explicitly exclude)
log_info "Installing packages with apk"
safe_execute chroot "$initramfs_dir" apk add --no-cache \
--no-scripts \
--clean-protected \
"${packages[@]}"
# Verify critical packages are installed
local critical_packages=("busybox" "musl" "alpine-baselayout")
for pkg in "${critical_packages[@]}"; do
if ! chroot "$initramfs_dir" apk info | grep -q "^${pkg}"; then
log_error "Critical package missing: ${pkg}"
alpine_cleanup_chroot "$initramfs_dir"
return 1
fi
done
# Ensure no OpenRC packages were installed
local openrc_packages=$(chroot "$initramfs_dir" apk info | grep -E "(openrc|sysvinit|systemd)" || true)
if [[ -n "$openrc_packages" ]]; then
log_warn "OpenRC-related packages detected:"
echo "$openrc_packages"
log_warn "These should be removed for zinit-only operation"
fi
alpine_cleanup_chroot "$initramfs_dir"
log_info "Package installation complete"
}
# Aggressive cleanup to minimize size
function alpine_aggressive_cleanup() {
local initramfs_dir="$1"
section_header "Aggressive Alpine Cleanup"
log_info "Starting cleanup in: ${initramfs_dir}"
# Remove documentation and man pages
log_info "Removing documentation and man pages"
safe_rmdir "${initramfs_dir}/usr/share/doc"
safe_rmdir "${initramfs_dir}/usr/share/man"
safe_rmdir "${initramfs_dir}/usr/share/info"
safe_rmdir "${initramfs_dir}/usr/share/gtk-doc"
# Remove locales except C/POSIX
log_info "Removing locales (keeping C/POSIX only)"
if [[ -d "${initramfs_dir}/usr/share/locale" ]]; then
find "${initramfs_dir}/usr/share/locale" -mindepth 1 -maxdepth 1 -type d \
! -name 'C' ! -name 'POSIX' -exec rm -rf {} + 2>/dev/null || true
fi
# Remove development headers and files
log_info "Removing development files"
safe_rmdir "${initramfs_dir}/usr/include"
safe_rmdir "${initramfs_dir}/usr/lib/pkgconfig"
safe_rmdir "${initramfs_dir}/usr/share/pkgconfig"
safe_rmdir "${initramfs_dir}/lib/pkgconfig"
# Remove static libraries
log_info "Removing static libraries"
find "${initramfs_dir}" -name "*.a" -type f -delete 2>/dev/null || true
# Remove APK cache and database backup
log_info "Removing APK cache and database backup"
safe_rmdir "${initramfs_dir}/var/cache/apk"
safe_rmdir "${initramfs_dir}/lib/apk/db"
find "${initramfs_dir}/var/lib/apk" -name "*.old" -delete 2>/dev/null || true
# Remove kernel source and headers if present
log_info "Removing kernel development files"
safe_rmdir "${initramfs_dir}/usr/src"
safe_rmdir "${initramfs_dir}/lib/modules/*/build"
safe_rmdir "${initramfs_dir}/lib/modules/*/source"
# Remove Python bytecode and cache
log_info "Removing Python cache files"
find "${initramfs_dir}" -name "__pycache__" -type d -exec rm -rf {} + 2>/dev/null || true
find "${initramfs_dir}" -name "*.pyc" -type f -delete 2>/dev/null || true
find "${initramfs_dir}" -name "*.pyo" -type f -delete 2>/dev/null || true
# Remove test files and examples
log_info "Removing test files and examples"
find "${initramfs_dir}" -path "*/test*" -type d -exec rm -rf {} + 2>/dev/null || true
find "${initramfs_dir}" -path "*/example*" -type d -exec rm -rf {} + 2>/dev/null || true
# Remove unnecessary files from usr/share
log_info "Cleaning usr/share directory"
local unwanted_share_dirs=(
"applications"
"icons"
"pixmaps"
"themes"
"fonts"
"sounds"
"desktop-directories"
"mime"
"glib-2.0/schemas"
)
for dir in "${unwanted_share_dirs[@]}"; do
safe_rmdir "${initramfs_dir}/usr/share/${dir}"
done
# Remove large timezone data (keep only UTC)
log_info "Trimming timezone data"
if [[ -d "${initramfs_dir}/usr/share/zoneinfo" ]]; then
find "${initramfs_dir}/usr/share/zoneinfo" -type f ! -name "UTC" ! -path "*/posix/*" -delete 2>/dev/null || true
fi
# Remove empty directories
log_info "Removing empty directories"
find "${initramfs_dir}" -type d -empty -delete 2>/dev/null || true
# Calculate size after cleanup
local total_size=$(du -sh "${initramfs_dir}" 2>/dev/null | cut -f1 || echo "unknown")
log_info "Initramfs size after cleanup: ${total_size}"
log_info "Aggressive cleanup complete"
}
# Configure Alpine repositories
function alpine_configure_repos() {
local initramfs_dir="$1"
local version="${2:-$ALPINE_VERSION}"
section_header "Configuring Alpine Repositories"
local repos_file="${initramfs_dir}/etc/apk/repositories"
# Create repositories file
cat > "$repos_file" << EOF
${ALPINE_MIRROR}/v${version}/main
${ALPINE_MIRROR}/v${version}/community
EOF
log_info "Configured Alpine repositories for version ${version}"
}
# Set Alpine system settings
function alpine_configure_system() {
local initramfs_dir="$1"
section_header "Configuring Alpine System Settings"
# Set hostname
echo "zero-os" > "${initramfs_dir}/etc/hostname"
# Configure hosts file
cat > "${initramfs_dir}/etc/hosts" << 'EOF'
127.0.0.1 localhost localhost.localdomain
::1 localhost localhost.localdomain
EOF
# Set timezone to UTC
if [[ -f "${initramfs_dir}/usr/share/zoneinfo/UTC" ]]; then
safe_execute ln -sf /usr/share/zoneinfo/UTC "${initramfs_dir}/etc/localtime"
fi
# Configure minimal profile
cat > "${initramfs_dir}/etc/profile" << 'EOF'
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
export PS1='\h:\w\$ '
export HOME=/root
export TERM=xterm
umask 022
EOF
# Set root shell
safe_execute chroot "$initramfs_dir" chsh -s /bin/sh root
log_info "Alpine system configuration complete"
}
# Export functions
export -f alpine_extract_miniroot alpine_setup_chroot alpine_cleanup_chroot
export -f alpine_install_packages alpine_aggressive_cleanup
export -f alpine_configure_repos alpine_configure_system

238
scripts/lib/common.sh Normal file
View File

@@ -0,0 +1,238 @@
#!/bin/bash
# Common functions and utilities for Zero OS Alpine Initramfs Builder
# Strict error handling
set -euo pipefail
# Script directory detection (only if not already set)
if [[ -z "${SCRIPT_DIR:-}" ]]; then
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
fi
if [[ -z "${PROJECT_ROOT:-}" ]]; then
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
fi
# Colors for output (if terminal supports it)
if [[ -t 1 ]]; then
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
else
RED=''
GREEN=''
YELLOW=''
BLUE=''
NC=''
fi
# Logging functions
function log_info() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[INFO]${NC} ${timestamp} - $*" >&2
}
function log_warn() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[WARN]${NC} ${timestamp} - $*" >&2
}
function log_error() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[ERROR]${NC} ${timestamp} - $*" >&2
}
function log_debug() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
if [[ "${DEBUG:-0}" == "1" ]]; then
echo -e "${BLUE}[DEBUG]${NC} ${timestamp} - $*" >&2
fi
}
# Command execution with full transparency
function safe_execute() {
local cmd="$*"
log_info "Executing: ${cmd}"
if [[ "${DEBUG:-0}" == "1" ]]; then
# In debug mode, show all output
if ! ${cmd}; then
log_error "Command failed: ${cmd}"
exit 1
fi
else
# Normal mode, capture output and show only on error
local output
if ! output=$(${cmd} 2>&1); then
log_error "Command failed: ${cmd}"
log_error "Output: ${output}"
exit 1
fi
fi
}
# Section headers with clear text separators
function section_header() {
local title="$1"
echo ""
echo "=================================================="
echo "SECTION: ${title}"
echo "=================================================="
log_info "Starting section: ${title}"
}
# Check if command exists
function command_exists() {
command -v "$1" >/dev/null 2>&1
}
# Check if we're running in a container
function in_container() {
[[ -f /.dockerenv ]] || [[ -f /run/.containerenv ]] || grep -q 'container' /proc/1/cgroup 2>/dev/null
}
# Verify required tools are available
function check_dependencies() {
local missing_deps=()
# Core build tools
local required_tools=(
"git"
"wget"
"tar"
"gzip"
"xz"
"cpio"
"strip"
"upx"
"rustc"
"cargo"
)
for tool in "${required_tools[@]}"; do
if ! command_exists "$tool"; then
missing_deps+=("$tool")
fi
done
# Check for container runtime (if not in container)
if ! in_container; then
if ! command_exists "podman" && ! command_exists "docker"; then
missing_deps+=("podman or docker")
fi
fi
if [[ ${#missing_deps[@]} -gt 0 ]]; then
log_error "Missing required dependencies:"
for dep in "${missing_deps[@]}"; do
log_error " - $dep"
done
return 1
fi
log_info "All dependencies satisfied"
return 0
}
# Create directory safely
function safe_mkdir() {
local dir="$1"
log_debug "Creating directory: ${dir}"
safe_execute mkdir -p "$dir"
}
# Remove directory safely
function safe_rmdir() {
local dir="$1"
if [[ -d "$dir" ]]; then
log_debug "Removing directory: ${dir}"
safe_execute rm -rf "$dir"
fi
}
# Copy file/directory safely
function safe_copy() {
local src="$1"
local dst="$2"
log_debug "Copying: ${src} -> ${dst}"
safe_execute cp -r "$src" "$dst"
}
# Check if path is absolute
function is_absolute_path() {
[[ "$1" = /* ]]
}
# Resolve relative path to absolute
function resolve_path() {
local path="$1"
if is_absolute_path "$path"; then
echo "$path"
else
echo "$(pwd)/$path"
fi
}
# Get file size in human readable format
function get_file_size() {
local file="$1"
if [[ -f "$file" ]]; then
du -h "$file" | cut -f1
else
echo "0B"
fi
}
# Wait for file to exist with timeout
function wait_for_file() {
local file="$1"
local timeout="${2:-30}"
local count=0
while [[ ! -f "$file" && $count -lt $timeout ]]; do
sleep 1
((count++))
done
[[ -f "$file" ]]
}
# Cleanup function for traps
function cleanup_on_exit() {
local exit_code=$?
log_info "Build process exiting with code: ${exit_code}"
# Unmount any mounted filesystems
if [[ -n "${CLEANUP_MOUNTS:-}" ]]; then
for mount in $CLEANUP_MOUNTS; do
if mountpoint -q "$mount" 2>/dev/null; then
log_info "Unmounting: $mount"
umount "$mount" 2>/dev/null || true
fi
done
fi
exit $exit_code
}
# Set up exit trap
trap cleanup_on_exit EXIT INT TERM
# Load build configuration after functions are defined
BUILD_CONF="${PROJECT_ROOT}/config/build.conf"
if [[ -f "$BUILD_CONF" ]]; then
log_debug "Loading build configuration from: ${BUILD_CONF}"
source "$BUILD_CONF"
else
log_warn "Build configuration not found: ${BUILD_CONF}"
log_warn "Using default values"
fi
# Export common variables
export SCRIPT_DIR PROJECT_ROOT
export -f log_info log_warn log_error log_debug
export -f safe_execute section_header
export -f command_exists in_container check_dependencies
export -f safe_mkdir safe_rmdir safe_copy
export -f is_absolute_path resolve_path get_file_size wait_for_file

448
scripts/lib/components.sh Normal file
View File

@@ -0,0 +1,448 @@
#!/bin/bash
# Component download and build system for ThreeFold Zero OS
# Source common functions
LIB_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${LIB_SCRIPT_DIR}/common.sh"
# Component configuration
RUST_TARGET="${RUST_TARGET:-x86_64-unknown-linux-musl}"
CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-target}"
# Parse and process all components from sources.conf
function components_parse_sources_conf() {
local sources_file="$1"
local components_dir="$2"
local install_dir="${INSTALL_DIR:-${PROJECT_ROOT}/initramfs}"
section_header "Parsing Sources Configuration"
if [[ ! -f "$sources_file" ]]; then
log_error "Sources file not found: ${sources_file}"
return 1
fi
# Ensure components directory exists
safe_mkdir "$components_dir"
# Export install directory for build functions
export INSTALL_DIR="$install_dir"
log_info "Processing components from: ${sources_file}"
log_info "Components directory: ${components_dir}"
log_info "Install directory: ${install_dir}"
local component_count=0
# Process each line in sources.conf
while IFS=: read -r type name url version build_func extra; do
# Skip comments and empty lines
if [[ "$type" =~ ^[[:space:]]*# ]] || [[ -z "${type// }" ]]; then
continue
fi
# Trim whitespace
type=$(echo "$type" | xargs)
name=$(echo "$name" | xargs)
url=$(echo "$url" | xargs)
version=$(echo "$version" | xargs)
build_func=$(echo "$build_func" | xargs)
extra=$(echo "$extra" | xargs)
if [[ -z "$type" || -z "$name" || -z "$url" || -z "$version" || -z "$build_func" ]]; then
log_warn "Skipping invalid line: ${type}:${name}:${url}:${version}:${build_func}:${extra}"
continue
fi
((component_count++))
log_info "Processing component ${component_count}: ${name} (${type})"
# Download component
case "$type" in
"git")
components_download_git "$name" "$url" "$version" "$components_dir"
;;
"release")
components_download_release "$name" "$url" "$version" "$components_dir" "$extra"
;;
*)
log_error "Unknown component type: $type"
return 1
;;
esac
# Build and install component
components_build_component "$name" "$build_func" "$components_dir"
done < "$sources_file"
if [[ $component_count -eq 0 ]]; then
log_warn "No components found in sources configuration"
else
log_info "Processed ${component_count} components successfully"
fi
}
# Download Git repository
function components_download_git() {
local name="$1"
local url="$2"
local version="$3"
local components_dir="$4"
section_header "Downloading Git Component: ${name}"
local target_dir="${components_dir}/${name}"
log_info "Repository: ${url}"
log_info "Version/Branch: ${version}"
log_info "Target directory: ${target_dir}"
if [[ -d "$target_dir" ]]; then
log_info "Component ${name} already exists, updating..."
safe_execute cd "$target_dir"
safe_execute git fetch --all
safe_execute git checkout "$version"
safe_execute git pull origin "$version" 2>/dev/null || log_info "Already up to date"
else
log_info "Cloning ${name} from ${url}"
safe_execute git clone --depth 1 --branch "$version" "$url" "$target_dir"
fi
# Verify checkout
safe_execute cd "$target_dir"
local current_ref=$(git rev-parse HEAD)
log_info "Current commit: ${current_ref}"
log_info "Git component download complete: ${name}"
}
# Download release binary/archive
function components_download_release() {
local name="$1"
local url="$2"
local version="$3"
local components_dir="$4"
local extra="$5"
section_header "Downloading Release Component: ${name}"
local target_dir="${components_dir}/${name}"
local filename=$(basename "$url")
log_info "Release URL: ${url}"
log_info "Version: ${version}"
log_info "Target directory: ${target_dir}"
safe_mkdir "$target_dir"
# Download release
log_info "Downloading release: ${filename}"
safe_execute wget --progress=dot:giga -O "${target_dir}/${filename}" "$url"
# Verify download
if [[ ! -f "${target_dir}/${filename}" ]]; then
log_error "Failed to download release: ${filename}"
return 1
fi
local file_size=$(get_file_size "${target_dir}/${filename}")
log_info "Downloaded file size: ${file_size}"
# Handle extra options (like rename)
if [[ -n "$extra" ]]; then
components_process_extra_options "$target_dir" "$filename" "$extra"
fi
log_info "Release component download complete: ${name}"
}
# Process extra options for components
function components_process_extra_options() {
local target_dir="$1"
local filename="$2"
local extra="$3"
log_info "Processing extra options: ${extra}"
# Handle rename option
if [[ "$extra" =~ rename=(.+) ]]; then
local new_name="${BASH_REMATCH[1]}"
log_info "Renaming ${filename} to ${new_name}"
safe_execute mv "${target_dir}/${filename}" "${target_dir}/${new_name}"
fi
# Handle extract option for archives
if [[ "$extra" =~ extract ]]; then
log_info "Extracting archive: ${filename}"
safe_execute cd "$target_dir"
case "$filename" in
*.tar.gz|*.tgz)
safe_execute tar -xzf "$filename"
;;
*.tar.bz2|*.tbz2)
safe_execute tar -xjf "$filename"
;;
*.tar.xz|*.txz)
safe_execute tar -xJf "$filename"
;;
*.zip)
safe_execute unzip "$filename"
;;
*)
log_warn "Unknown archive format: ${filename}"
;;
esac
fi
}
# Build component using specified build function
function components_build_component() {
local name="$1"
local build_func="$2"
local components_dir="$3"
section_header "Building Component: ${name}"
local component_dir="${components_dir}/${name}"
if [[ ! -d "$component_dir" ]]; then
log_error "Component directory not found: ${component_dir}"
return 1
fi
# Change to component directory
safe_execute cd "$component_dir"
log_info "Build function: ${build_func}"
log_info "Working directory: $(pwd)"
# Check if build function exists
if ! declare -f "$build_func" >/dev/null; then
log_error "Build function not found: ${build_func}"
return 1
fi
# Call the specific build function
log_info "Executing build function: ${build_func}"
"$build_func" "$name" "$component_dir"
log_info "Component build complete: ${name}"
}
# Setup Rust environment for musl builds
function components_setup_rust_env() {
section_header "Setting Up Rust Environment"
# Ensure musl target is installed
if ! rustup target list --installed | grep -q "$RUST_TARGET"; then
log_info "Installing Rust target: ${RUST_TARGET}"
safe_execute rustup target add "$RUST_TARGET"
else
log_info "Rust target already installed: ${RUST_TARGET}"
fi
# Set environment variables for static linking
export RUSTFLAGS="-C target-feature=+crt-static"
export CC_x86_64_unknown_linux_musl="musl-gcc"
export CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER="musl-gcc"
log_info "Rust environment configured for musl builds"
log_info "RUST_TARGET: ${RUST_TARGET}"
log_info "RUSTFLAGS: ${RUSTFLAGS}"
}
# Build function for zinit (standard Rust build)
function build_zinit() {
local name="$1"
local component_dir="$2"
section_header "Building zinit with musl target"
components_setup_rust_env
log_info "Building zinit from: ${component_dir}"
# Build with musl target
safe_execute cargo build --release --target "$RUST_TARGET"
# Find and install binary
local binary_path="target/${RUST_TARGET}/release/zinit"
if [[ ! -f "$binary_path" ]]; then
log_error "zinit binary not found at: ${binary_path}"
return 1
fi
# Install to initramfs
safe_mkdir "${INSTALL_DIR}/sbin"
safe_execute cp "$binary_path" "${INSTALL_DIR}/sbin/zinit"
safe_execute chmod +x "${INSTALL_DIR}/sbin/zinit"
local binary_size=$(get_file_size "${INSTALL_DIR}/sbin/zinit")
log_info "Installed zinit binary (${binary_size}) to: ${INSTALL_DIR}/sbin/zinit"
}
# Build function for rfs (standard Rust build)
function build_rfs() {
local name="$1"
local component_dir="$2"
section_header "Building rfs with musl target"
components_setup_rust_env
log_info "Building rfs from: ${component_dir}"
# Build with musl target
safe_execute cargo build --release --target "$RUST_TARGET"
# Find and install binary
local binary_path="target/${RUST_TARGET}/release/rfs"
if [[ ! -f "$binary_path" ]]; then
log_error "rfs binary not found at: ${binary_path}"
return 1
fi
# Install to initramfs
safe_mkdir "${INSTALL_DIR}/usr/bin"
safe_execute cp "$binary_path" "${INSTALL_DIR}/usr/bin/rfs"
safe_execute chmod +x "${INSTALL_DIR}/usr/bin/rfs"
local binary_size=$(get_file_size "${INSTALL_DIR}/usr/bin/rfs")
log_info "Installed rfs binary (${binary_size}) to: ${INSTALL_DIR}/usr/bin/rfs"
}
# Build function for mycelium (special subdirectory build)
function build_mycelium() {
local name="$1"
local component_dir="$2"
section_header "Building mycelium with musl target (special directory)"
components_setup_rust_env
log_info "Building mycelium from: ${component_dir}"
# Change to myceliumd subdirectory (special requirement)
local myceliumd_dir="${component_dir}/myceliumd"
if [[ ! -d "$myceliumd_dir" ]]; then
log_error "myceliumd directory not found at: ${myceliumd_dir}"
return 1
fi
safe_execute cd "$myceliumd_dir"
log_info "Building in myceliumd subdirectory: $(pwd)"
# Build with musl target
safe_execute cargo build --release --target "$RUST_TARGET"
# Find and install binary (from target/x86.../release)
local binary_path="target/${RUST_TARGET}/release/mycelium"
if [[ ! -f "$binary_path" ]]; then
log_error "mycelium binary not found at: ${binary_path}"
return 1
fi
# Install to initramfs
safe_mkdir "${INSTALL_DIR}/usr/bin"
safe_execute cp "$binary_path" "${INSTALL_DIR}/usr/bin/mycelium"
safe_execute chmod +x "${INSTALL_DIR}/usr/bin/mycelium"
local binary_size=$(get_file_size "${INSTALL_DIR}/usr/bin/mycelium")
log_info "Installed mycelium binary (${binary_size}) to: ${INSTALL_DIR}/usr/bin/mycelium"
}
# Install function for corex (pre-built binary)
function install_corex() {
local name="$1"
local component_dir="$2"
section_header "Installing corex binary"
log_info "Installing corex from: ${component_dir}"
# Find the corex binary (may have been renamed)
local binary_path
if [[ -f "${component_dir}/corex" ]]; then
binary_path="${component_dir}/corex"
elif [[ -f "${component_dir}/corex-2.1.4-amd64-linux-static" ]]; then
binary_path="${component_dir}/corex-2.1.4-amd64-linux-static"
else
log_error "corex binary not found in: ${component_dir}"
return 1
fi
# Make executable and install
safe_execute chmod +x "$binary_path"
safe_mkdir "${INSTALL_DIR}/usr/bin"
safe_execute cp "$binary_path" "${INSTALL_DIR}/usr/bin/corex"
safe_execute chmod +x "${INSTALL_DIR}/usr/bin/corex"
local binary_size=$(get_file_size "${INSTALL_DIR}/usr/bin/corex")
log_info "Installed corex binary (${binary_size}) to: ${INSTALL_DIR}/usr/bin/corex"
}
# Verify all built components
function components_verify_installation() {
local install_dir="${INSTALL_DIR:-${PROJECT_ROOT}/initramfs}"
section_header "Verifying Component Installation"
# List of expected binaries and their locations
local expected_binaries=(
"sbin/zinit"
"usr/bin/rfs"
"usr/bin/mycelium"
"usr/bin/corex"
)
local missing_count=0
for binary in "${expected_binaries[@]}"; do
local full_path="${install_dir}/${binary}"
if [[ -f "$full_path" && -x "$full_path" ]]; then
local size=$(get_file_size "$full_path")
log_info "${binary} (${size})"
else
log_error "✗ Missing or not executable: ${binary}"
((missing_count++))
fi
done
if [[ $missing_count -eq 0 ]]; then
log_info "All components installed successfully"
return 0
else
log_error "${missing_count} components missing or invalid"
return 1
fi
}
# Clean component build artifacts
function components_cleanup() {
local components_dir="$1"
local keep_sources="${2:-false}"
section_header "Cleaning Component Build Artifacts"
if [[ "$keep_sources" == "true" ]]; then
log_info "Keeping source directories, cleaning build artifacts only"
# Clean Rust build artifacts
find "$components_dir" -name "target" -type d -exec rm -rf {} + 2>/dev/null || true
find "$components_dir" -name "Cargo.lock" -type f -delete 2>/dev/null || true
else
log_info "Removing all component directories"
safe_rmdir "$components_dir"
fi
log_info "Component cleanup complete"
}
# Export functions
export -f components_parse_sources_conf
export -f components_download_git components_download_release components_process_extra_options
export -f components_build_component components_setup_rust_env
export -f build_zinit build_rfs build_mycelium install_corex
export -f components_verify_installation components_cleanup

276
scripts/lib/docker.sh Normal file
View File

@@ -0,0 +1,276 @@
#!/bin/bash
# Container management for rootless Docker/Podman builds
# Source common functions
LIB_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${LIB_SCRIPT_DIR}/common.sh"
# Container configuration
CONTAINER_RUNTIME=""
BUILDER_IMAGE="zero-os-builder:latest"
ALPINE_VERSION="${ALPINE_VERSION:-3.22}"
# Detect available container runtime
function docker_detect_runtime() {
section_header "Detecting Container Runtime"
if command_exists "podman"; then
CONTAINER_RUNTIME="podman"
log_info "Using Podman as container runtime"
elif command_exists "docker"; then
CONTAINER_RUNTIME="docker"
log_info "Using Docker as container runtime"
else
log_error "No container runtime found (podman or docker required)"
return 1
fi
# Check if rootless setup is working
docker_verify_rootless
}
# Verify rootless container setup
function docker_verify_rootless() {
section_header "Verifying Rootless Container Setup"
log_info "Checking ${CONTAINER_RUNTIME} rootless configuration"
safe_execute ${CONTAINER_RUNTIME} system info
# Test basic rootless functionality
log_info "Testing rootless container execution"
safe_execute ${CONTAINER_RUNTIME} run --rm alpine:${ALPINE_VERSION} echo "Rootless container test successful"
log_info "Rootless container setup verified"
}
# Build container image with build tools
function docker_build_container() {
local dockerfile_path="${1:-${PROJECT_ROOT}/Dockerfile}"
local tag="${2:-${BUILDER_IMAGE}}"
section_header "Building Container Image"
# Create Dockerfile if it doesn't exist
if [[ ! -f "$dockerfile_path" ]]; then
docker_create_dockerfile "$dockerfile_path"
fi
log_info "Building container image: ${tag}"
safe_execute ${CONTAINER_RUNTIME} build -t "${tag}" -f "${dockerfile_path}" "${PROJECT_ROOT}"
log_info "Container image built successfully: ${tag}"
}
# Create optimized Dockerfile for build environment
function docker_create_dockerfile() {
local dockerfile_path="$1"
section_header "Creating Dockerfile"
cat > "$dockerfile_path" << 'EOF'
FROM alpine:3.22
# Install build dependencies
RUN apk add --no-cache \
build-base \
rust \
cargo \
upx \
git \
wget \
tar \
gzip \
xz \
cpio \
binutils \
linux-headers \
musl-dev \
pkgconfig \
openssl-dev
# Add Rust musl target
RUN rustup target add x86_64-unknown-linux-musl
# Create non-root user for builds
RUN adduser -D -s /bin/sh builder && \
chown -R builder:builder /home/builder
# Set working directory
WORKDIR /workspace
# Switch to non-root user
USER builder
# Set environment variables for static linking
ENV RUSTFLAGS="-C target-feature=+crt-static"
ENV CC_x86_64_unknown_linux_musl="musl-gcc"
ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER="musl-gcc"
CMD ["/bin/sh"]
EOF
log_info "Created Dockerfile: ${dockerfile_path}"
}
# Start rootless container for building
function docker_start_rootless() {
local image="${1:-${BUILDER_IMAGE}}"
local workdir="${2:-/workspace}"
local command="${3:-/bin/sh}"
section_header "Starting Rootless Container"
# Setup volume mounts
local user_args="--user $(id -u):$(id -g)"
local volume_args="-v ${PROJECT_ROOT}:${workdir}"
local env_args=""
# Pass through environment variables
local env_vars=(
"DEBUG"
"ALPINE_VERSION"
"KERNEL_VERSION"
"RUST_TARGET"
"OPTIMIZATION_LEVEL"
)
for var in "${env_vars[@]}"; do
if [[ -n "${!var:-}" ]]; then
env_args="${env_args} -e ${var}=${!var}"
fi
done
log_info "Starting container with rootless privileges"
safe_execute ${CONTAINER_RUNTIME} run --rm -it \
${user_args} \
${volume_args} \
${env_args} \
-w "${workdir}" \
"${image}" \
${command}
}
# Run build command in container
function docker_run_build() {
local build_script="${1:-./scripts/build.sh}"
local image="${2:-${BUILDER_IMAGE}}"
section_header "Running Build in Container"
# Ensure build script is executable
safe_execute chmod +x "${PROJECT_ROOT}/${build_script}"
# Setup container arguments
local user_args="--user $(id -u):$(id -g)"
local volume_args="-v ${PROJECT_ROOT}:/workspace"
local work_args="-w /workspace"
log_info "Executing build script in container: ${build_script}"
safe_execute ${CONTAINER_RUNTIME} run --rm \
${user_args} \
${volume_args} \
${work_args} \
"${image}" \
${build_script}
}
# Commit container state for reuse
function docker_commit_builder() {
local container_id="$1"
local new_tag="${2:-${BUILDER_IMAGE}-cached}"
section_header "Committing Builder Container"
log_info "Committing container ${container_id} as ${new_tag}"
safe_execute ${CONTAINER_RUNTIME} commit "${container_id}" "${new_tag}"
log_info "Container committed successfully: ${new_tag}"
}
# Clean up container images
function docker_cleanup() {
local keep_builder="${1:-false}"
section_header "Cleaning Up Container Images"
if [[ "$keep_builder" != "true" ]]; then
log_info "Removing builder images"
safe_execute ${CONTAINER_RUNTIME} rmi "${BUILDER_IMAGE}" || true
safe_execute ${CONTAINER_RUNTIME} rmi "${BUILDER_IMAGE}-cached" || true
fi
log_info "Pruning unused containers and images"
safe_execute ${CONTAINER_RUNTIME} system prune -f
log_info "Container cleanup complete"
}
# Check container runtime capabilities
function docker_check_capabilities() {
section_header "Checking Container Capabilities"
# Check user namespace support
if [[ -f /proc/sys/user/max_user_namespaces ]]; then
local max_namespaces=$(cat /proc/sys/user/max_user_namespaces)
log_info "User namespaces available: ${max_namespaces}"
if [[ "$max_namespaces" -eq 0 ]]; then
log_warn "User namespaces are disabled, rootless containers may not work"
fi
fi
# Check subuid/subgid configuration
local current_user=$(whoami)
if [[ -f /etc/subuid ]] && grep -q "^${current_user}:" /etc/subuid; then
log_info "subuid configured for user: ${current_user}"
else
log_warn "subuid not configured for user: ${current_user}"
log_warn "Run: echo '${current_user}:100000:65536' | sudo tee -a /etc/subuid"
fi
if [[ -f /etc/subgid ]] && grep -q "^${current_user}:" /etc/subgid; then
log_info "subgid configured for user: ${current_user}"
else
log_warn "subgid not configured for user: ${current_user}"
log_warn "Run: echo '${current_user}:100000:65536' | sudo tee -a /etc/subgid"
fi
}
# Setup rootless environment
function docker_setup_rootless() {
section_header "Setting Up Rootless Environment"
local current_user=$(whoami)
# Check if running as root
if [[ "$EUID" -eq 0 ]]; then
log_error "Do not run as root. Rootless containers require non-root user."
return 1
fi
# Check and setup subuid/subgid if needed
if ! grep -q "^${current_user}:" /etc/subuid 2>/dev/null; then
log_info "Setting up subuid for ${current_user}"
echo "${current_user}:100000:65536" | sudo tee -a /etc/subuid
fi
if ! grep -q "^${current_user}:" /etc/subgid 2>/dev/null; then
log_info "Setting up subgid for ${current_user}"
echo "${current_user}:100000:65536" | sudo tee -a /etc/subgid
fi
# Initialize container runtime if needed
if [[ "$CONTAINER_RUNTIME" == "podman" ]]; then
log_info "Initializing Podman for rootless use"
safe_execute podman system migrate || true
fi
log_info "Rootless environment setup complete"
}
# Export functions
export -f docker_detect_runtime docker_verify_rootless
export -f docker_build_container docker_create_dockerfile
export -f docker_start_rootless docker_run_build
export -f docker_commit_builder docker_cleanup
export -f docker_check_capabilities docker_setup_rootless

440
scripts/lib/initramfs.sh Normal file
View File

@@ -0,0 +1,440 @@
#!/bin/bash
# Initramfs assembly and optimization
# Source common functions
LIB_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${LIB_SCRIPT_DIR}/common.sh"
# Initramfs configuration
INITRAMFS_COMPRESSION="${INITRAMFS_COMPRESSION:-xz}"
XZ_COMPRESSION_LEVEL="${XZ_COMPRESSION_LEVEL:-9}"
# Setup zinit as init system (replaces OpenRC completely)
function initramfs_setup_zinit() {
local initramfs_dir="$1"
local zinit_config_dir="$2"
section_header "Setting up zinit as init system"
if [[ ! -d "$zinit_config_dir" ]]; then
log_error "zinit configuration directory not found: ${zinit_config_dir}"
return 1
fi
# Verify zinit binary exists
if [[ ! -x "${initramfs_dir}/sbin/zinit" ]]; then
log_error "zinit binary not found or not executable: ${initramfs_dir}/sbin/zinit"
return 1
fi
# Remove existing init (if any) and replace with zinit
log_info "Replacing system init with zinit"
safe_execute rm -f "${initramfs_dir}/sbin/init"
safe_execute ln -sf zinit "${initramfs_dir}/sbin/init"
# Copy zinit configuration
log_info "Installing zinit configuration"
safe_mkdir "${initramfs_dir}/etc/zinit"
safe_copy "${zinit_config_dir}"/* "${initramfs_dir}/etc/zinit/"
# Ensure proper permissions
safe_execute chmod 755 "${initramfs_dir}/sbin/zinit"
safe_execute chmod -R 644 "${initramfs_dir}/etc/zinit"
safe_execute find "${initramfs_dir}/etc/zinit" -name "*.sh" -exec chmod 755 {} \;
# Create zinit working directories
safe_mkdir "${initramfs_dir}/var/log/zinit"
safe_mkdir "${initramfs_dir}/run/zinit"
# Remove any OpenRC remnants (ensure complete replacement)
local openrc_paths=(
"/etc/init.d"
"/etc/runlevels"
"/etc/conf.d"
"/sbin/openrc"
"/sbin/rc-service"
"/sbin/rc-status"
"/sbin/rc-update"
)
for path in "${openrc_paths[@]}"; do
if [[ -e "${initramfs_dir}${path}" ]]; then
log_info "Removing OpenRC remnant: ${path}"
safe_execute rm -rf "${initramfs_dir}${path}"
fi
done
log_info "zinit setup complete - OpenRC completely replaced"
}
# Setup 2-stage module loading system
function initramfs_setup_modules() {
local initramfs_dir="$1"
local modules_conf="$2"
local kernel_version="${3:-$(uname -r)}"
section_header "Setting up 2-stage module loading"
if [[ ! -f "$modules_conf" ]]; then
log_error "Modules configuration file not found: ${modules_conf}"
return 1
fi
local modules_dir="${initramfs_dir}/lib/modules/${kernel_version}"
safe_mkdir "$modules_dir"
# Create stage1 module list (critical boot modules)
log_info "Creating stage1 module list (critical boot modules)"
grep "^stage1:" "$modules_conf" | cut -d: -f2 > "${modules_dir}/stage1.list"
# Create stage2 module list (extended hardware support)
log_info "Creating stage2 module list (extended hardware support)"
grep "^stage2:" "$modules_conf" | cut -d: -f2 > "${modules_dir}/stage2.list"
# Create module loading scripts
initramfs_create_module_scripts "$initramfs_dir" "$kernel_version"
# Count modules
local stage1_count=$(wc -l < "${modules_dir}/stage1.list" 2>/dev/null || echo 0)
local stage2_count=$(wc -l < "${modules_dir}/stage2.list" 2>/dev/null || echo 0)
log_info "Module configuration complete:"
log_info " Stage1 (critical): ${stage1_count} modules"
log_info " Stage2 (extended): ${stage2_count} modules"
}
# Create module loading scripts for zinit
function initramfs_create_module_scripts() {
local initramfs_dir="$1"
local kernel_version="$2"
log_info "Creating module loading scripts"
safe_mkdir "${initramfs_dir}/etc/zinit/init"
# Stage1 module loading script (critical modules)
cat > "${initramfs_dir}/etc/zinit/init/stage1-modules.sh" << 'EOF'
#!/bin/sh
# Stage1 module loading - critical boot modules
KERNEL_VERSION=$(uname -r)
STAGE1_LIST="/lib/modules/${KERNEL_VERSION}/stage1.list"
echo "Loading stage1 modules (critical boot)"
if [ -f "$STAGE1_LIST" ]; then
while read -r module; do
if [ -n "$module" ] && [ "$module" != "#"* ]; then
echo "Loading critical module: $module"
modprobe "$module" 2>/dev/null || echo "Warning: Failed to load $module"
fi
done < "$STAGE1_LIST"
else
echo "Warning: Stage1 module list not found: $STAGE1_LIST"
fi
echo "Stage1 module loading complete"
EOF
# Stage2 module loading script (extended hardware)
cat > "${initramfs_dir}/etc/zinit/init/stage2-modules.sh" << 'EOF'
#!/bin/sh
# Stage2 module loading - extended hardware support
KERNEL_VERSION=$(uname -r)
STAGE2_LIST="/lib/modules/${KERNEL_VERSION}/stage2.list"
echo "Loading stage2 modules (extended hardware)"
if [ -f "$STAGE2_LIST" ]; then
while read -r module; do
if [ -n "$module" ] && [ "$module" != "#"* ]; then
echo "Loading hardware module: $module"
modprobe "$module" 2>/dev/null || echo "Info: Module $module not available"
fi
done < "$STAGE2_LIST"
else
echo "Warning: Stage2 module list not found: $STAGE2_LIST"
fi
echo "Stage2 module loading complete"
EOF
# Make scripts executable
safe_execute chmod 755 "${initramfs_dir}/etc/zinit/init/stage1-modules.sh"
safe_execute chmod 755 "${initramfs_dir}/etc/zinit/init/stage2-modules.sh"
log_info "Module loading scripts created"
}
# Strip and UPX compress all binaries for maximum size optimization
function initramfs_strip_and_upx() {
local initramfs_dir="$1"
section_header "Stripping and UPX compressing binaries"
local stripped_count=0
local upx_count=0
local failed_strip=0
local failed_upx=0
# Find and process all executable files
log_info "Processing executable files..."
while IFS= read -r -d '' file; do
# Check if it's a valid ELF executable
if file "$file" | grep -q "ELF.*executable"; then
log_debug "Processing executable: $file"
# Strip debug symbols
if strip "$file" 2>/dev/null; then
((stripped_count++))
log_debug "Stripped: $file"
else
((failed_strip++))
log_debug "Failed to strip: $file"
fi
# UPX compress (best compression)
if upx --best --force "$file" 2>/dev/null; then
((upx_count++))
log_debug "UPX compressed: $file"
else
((failed_upx++))
log_debug "Failed to UPX: $file"
fi
fi
done < <(find "$initramfs_dir" -type f -executable -print0)
# Process shared libraries
log_info "Processing shared libraries..."
local lib_stripped=0
local lib_failed=0
while IFS= read -r -d '' file; do
if file "$file" | grep -q "ELF.*shared object"; then
log_debug "Processing library: $file"
# Strip libraries (more conservative - keep function symbols)
if strip --strip-unneeded "$file" 2>/dev/null; then
((lib_stripped++))
log_debug "Stripped library: $file"
else
((lib_failed++))
log_debug "Failed to strip library: $file"
fi
fi
done < <(find "$initramfs_dir" -name "*.so*" -type f -print0)
# Summary
log_info "Binary optimization complete:"
log_info " Executables stripped: ${stripped_count} (${failed_strip} failed)"
log_info " Executables UPX compressed: ${upx_count} (${failed_upx} failed)"
log_info " Libraries stripped: ${lib_stripped} (${lib_failed} failed)"
# Calculate space savings
local total_size=$(du -sb "$initramfs_dir" 2>/dev/null | cut -f1 || echo "0")
local total_mb=$((total_size / 1024 / 1024))
log_info "Total initramfs size after optimization: ${total_mb}MB"
}
# Create final initramfs.cpio.xz archive
function initramfs_create_cpio() {
local initramfs_dir="$1"
local output_file="$2"
local compression="${3:-$INITRAMFS_COMPRESSION}"
section_header "Creating initramfs.cpio.${compression}"
if [[ ! -d "$initramfs_dir" ]]; then
log_error "Initramfs directory not found: ${initramfs_dir}"
return 1
fi
# Ensure output directory exists
local output_dir=$(dirname "$output_file")
safe_mkdir "$output_dir"
# Remove any existing output file
safe_execute rm -f "$output_file"
log_info "Source directory: ${initramfs_dir}"
log_info "Output file: ${output_file}"
log_info "Compression: ${compression}"
# Change to initramfs directory for relative paths
safe_execute cd "$initramfs_dir"
case "$compression" in
"xz")
log_info "Creating XZ compressed CPIO archive"
safe_execute find . -print0 | cpio -o -H newc -0 | xz -${XZ_COMPRESSION_LEVEL} --check=crc32 > "$output_file"
;;
"gzip"|"gz")
log_info "Creating gzip compressed CPIO archive"
safe_execute find . -print0 | cpio -o -H newc -0 | gzip -9 > "$output_file"
;;
"zstd")
log_info "Creating zstd compressed CPIO archive"
safe_execute find . -print0 | cpio -o -H newc -0 | zstd -19 > "$output_file"
;;
"none"|"uncompressed")
log_info "Creating uncompressed CPIO archive"
safe_execute find . -print0 | cpio -o -H newc -0 > "$output_file"
;;
*)
log_error "Unsupported compression format: ${compression}"
return 1
;;
esac
# Verify output file was created
if [[ ! -f "$output_file" ]]; then
log_error "Failed to create initramfs archive: ${output_file}"
return 1
fi
# Report final size
local final_size=$(get_file_size "$output_file")
local uncompressed_size=$(du -sh "$initramfs_dir" | cut -f1)
log_info "Initramfs creation complete:"
log_info " Uncompressed size: ${uncompressed_size}"
log_info " Final archive size: ${final_size}"
log_info " Archive: ${output_file}"
}
# Validate initramfs contents
function initramfs_validate() {
local initramfs_dir="$1"
section_header "Validating initramfs contents"
local errors=0
# Check essential files and directories
local essential_items=(
"sbin/init"
"sbin/zinit"
"bin/busybox"
"etc/zinit"
"lib"
"usr/bin"
"var"
"tmp"
"proc"
"sys"
"dev"
)
for item in "${essential_items[@]}"; do
if [[ ! -e "${initramfs_dir}/${item}" ]]; then
log_error "Missing essential item: ${item}"
((errors++))
else
log_debug "Found: ${item}"
fi
done
# Check that init is properly linked to zinit
if [[ -L "${initramfs_dir}/sbin/init" ]]; then
local link_target=$(readlink "${initramfs_dir}/sbin/init")
if [[ "$link_target" == "zinit" ]]; then
log_info "✓ /sbin/init correctly linked to zinit"
else
log_error "✗ /sbin/init linked to wrong target: ${link_target}"
((errors++))
fi
else
log_error "✗ /sbin/init is not a symbolic link"
((errors++))
fi
# Check zinit configuration
if [[ -f "${initramfs_dir}/etc/zinit/zinit.conf" ]]; then
log_info "✓ zinit configuration found"
else
log_error "✗ zinit configuration missing"
((errors++))
fi
# Verify no OpenRC remnants
local openrc_check=(
"etc/init.d"
"etc/runlevels"
"sbin/openrc"
)
for path in "${openrc_check[@]}"; do
if [[ -e "${initramfs_dir}/${path}" ]]; then
log_warn "OpenRC remnant found: ${path}"
fi
done
# Check component binaries
local component_binaries=(
"usr/bin/rfs"
"usr/bin/mycelium"
"usr/bin/corex"
)
for binary in "${component_binaries[@]}"; do
if [[ -x "${initramfs_dir}/${binary}" ]]; then
log_info "✓ Component binary: ${binary}"
else
log_warn "Component binary missing or not executable: ${binary}"
fi
done
if [[ $errors -eq 0 ]]; then
log_info "Initramfs validation passed"
return 0
else
log_error "Initramfs validation failed with ${errors} errors"
return 1
fi
}
# Test initramfs archive integrity
function initramfs_test_archive() {
local archive_file="$1"
section_header "Testing initramfs archive integrity"
if [[ ! -f "$archive_file" ]]; then
log_error "Archive file not found: ${archive_file}"
return 1
fi
# Test based on file extension
case "$archive_file" in
*.xz)
log_info "Testing XZ archive integrity"
safe_execute xz -t "$archive_file"
;;
*.gz)
log_info "Testing gzip archive integrity"
safe_execute gzip -t "$archive_file"
;;
*.zst)
log_info "Testing zstd archive integrity"
safe_execute zstd -t "$archive_file"
;;
*.cpio)
log_info "Testing CPIO archive integrity"
safe_execute cpio -t < "$archive_file" >/dev/null
;;
*)
log_warn "Unknown archive format, skipping integrity test"
return 0
;;
esac
log_info "Archive integrity test passed"
}
# Export functions
export -f initramfs_setup_zinit initramfs_setup_modules initramfs_create_module_scripts
export -f initramfs_strip_and_upx initramfs_create_cpio
export -f initramfs_validate initramfs_test_archive

265
scripts/lib/kernel.sh Normal file
View File

@@ -0,0 +1,265 @@
#!/bin/bash
# Kernel building with embedded initramfs
# Source common functions
LIB_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${LIB_SCRIPT_DIR}/common.sh"
# Kernel configuration
KERNEL_VERSION="${KERNEL_VERSION:-6.12.44}"
KERNEL_SOURCE_URL="${KERNEL_SOURCE_URL:-https://cdn.kernel.org/pub/linux/kernel}"
KERNEL_CONFIG_SOURCE="${KERNEL_CONFIG_SOURCE:-${PROJECT_ROOT}/configs/kernel-config-generic}"
# Download kernel source
function kernel_download_source() {
local kernel_dir="$1"
local version="${2:-$KERNEL_VERSION}"
section_header "Downloading Kernel Source"
local major_version=$(echo "$version" | cut -d. -f1)
local url="${KERNEL_SOURCE_URL}/v${major_version}.x/linux-${version}.tar.xz"
local temp_file="/tmp/linux-${version}.tar.xz"
local source_dir="${kernel_dir}/linux-${version}"
log_info "Kernel version: ${version}"
log_info "Download URL: ${url}"
log_info "Target directory: ${kernel_dir}"
# Clean existing kernel directory
if [[ -d "$kernel_dir" ]]; then
log_info "Cleaning existing kernel directory"
safe_rmdir "$kernel_dir"
fi
safe_mkdir "$kernel_dir"
# Download kernel source
if [[ ! -f "$temp_file" ]]; then
log_info "Downloading kernel source: ${version}"
safe_execute wget --progress=dot:giga -O "$temp_file" "$url"
else
log_info "Using cached kernel source: ${temp_file}"
fi
# Verify download
local file_size=$(get_file_size "$temp_file")
log_info "Kernel source size: ${file_size}"
# Extract kernel source
log_info "Extracting kernel source"
safe_execute tar -xJf "$temp_file" -C "$kernel_dir"
# Verify extraction
if [[ ! -d "$source_dir" ]]; then
log_error "Kernel source extraction failed"
return 1
fi
# Create symlink for easier access
safe_execute ln -sf "linux-${version}" "${kernel_dir}/current"
# Cleanup download
safe_execute rm "$temp_file"
log_info "Kernel source download complete: ${source_dir}"
}
# Apply kernel configuration with embedded initramfs
function kernel_apply_config() {
local kernel_dir="$1"
local initramfs_path="$2"
local config_source="${3:-$KERNEL_CONFIG_SOURCE}"
section_header "Applying Kernel Configuration"
local source_dir="${kernel_dir}/current"
if [[ ! -d "$source_dir" ]]; then
log_error "Kernel source directory not found: ${source_dir}"
return 1
fi
if [[ ! -f "$config_source" ]]; then
log_error "Kernel config source not found: ${config_source}"
return 1
fi
if [[ ! -f "$initramfs_path" ]]; then
log_error "Initramfs file not found: ${initramfs_path}"
return 1
fi
safe_execute cd "$source_dir"
# Copy base configuration
log_info "Copying kernel configuration from: ${config_source}"
safe_copy "$config_source" ".config"
# Update configuration for embedded initramfs
log_info "Updating configuration for embedded initramfs"
log_info "Initramfs path: ${initramfs_path}"
# Resolve absolute path for initramfs
local abs_initramfs_path=$(resolve_path "$initramfs_path")
# Modify config for embedded initramfs
kernel_modify_config_for_initramfs "$abs_initramfs_path"
# Run olddefconfig to apply defaults for any new options
log_info "Running olddefconfig to finalize configuration"
safe_execute make olddefconfig
log_info "Kernel configuration applied successfully"
}
# Modify kernel config for embedded initramfs
function kernel_modify_config_for_initramfs() {
local initramfs_path="$1"
log_info "Modifying kernel config for embedded initramfs"
# Use sed to update configuration
safe_execute sed -i "s|^CONFIG_INITRAMFS_SOURCE=.*|CONFIG_INITRAMFS_SOURCE=\"${initramfs_path}\"|" .config
# Ensure XZ compression is enabled for initramfs
safe_execute sed -i 's/^# CONFIG_RD_XZ is not set/CONFIG_RD_XZ=y/' .config
safe_execute sed -i 's/^CONFIG_INITRAMFS_COMPRESSION_NONE=y/# CONFIG_INITRAMFS_COMPRESSION_NONE is not set/' .config
safe_execute sed -i 's/^# CONFIG_INITRAMFS_COMPRESSION_XZ is not set/CONFIG_INITRAMFS_COMPRESSION_XZ=y/' .config
# Verify critical settings
if ! grep -q "CONFIG_INITRAMFS_SOURCE=\"${initramfs_path}\"" .config; then
log_error "Failed to set INITRAMFS_SOURCE in kernel config"
return 1
fi
if ! grep -q "CONFIG_RD_XZ=y" .config; then
log_error "Failed to enable XZ decompression in kernel config"
return 1
fi
log_info "Kernel config updated for embedded initramfs"
}
# Build kernel with embedded initramfs
function kernel_build_with_initramfs() {
local kernel_config="$1"
local initramfs_path="$2"
local output_file="$3"
local kernel_dir="${4:-${PROJECT_ROOT}/kernel}"
section_header "Building Kernel with Embedded Initramfs"
# Download kernel source if needed
if [[ ! -d "${kernel_dir}/current" ]]; then
kernel_download_source "$kernel_dir"
fi
# Apply configuration
kernel_apply_config "$kernel_dir" "$initramfs_path" "$kernel_config"
local source_dir="${kernel_dir}/current"
safe_execute cd "$source_dir"
# Clean previous build
log_info "Cleaning previous kernel build"
safe_execute make clean
# Determine number of cores for parallel build
local cores=$(nproc)
local jobs=$((cores > 1 ? cores - 1 : 1)) # Leave one core free
log_info "Building with ${jobs} parallel jobs"
# Build kernel
log_info "Building kernel (this may take a while)..."
safe_execute make -j${jobs} bzImage
# Verify kernel was built
local kernel_image="arch/x86/boot/bzImage"
if [[ ! -f "$kernel_image" ]]; then
log_error "Kernel build failed - bzImage not found"
return 1
fi
# Copy to output location
local output_dir=$(dirname "$output_file")
safe_mkdir "$output_dir"
safe_copy "$kernel_image" "$output_file"
# Verify final kernel
local kernel_size=$(get_file_size "$output_file")
log_info "Kernel build complete:"
log_info " Output file: ${output_file}"
log_info " Kernel size: ${kernel_size}"
# Verify initramfs is embedded
if strings "$output_file" | grep -q "initramfs"; then
log_info "✓ Initramfs appears to be embedded in kernel"
else
log_warn "Initramfs embedding verification inconclusive"
fi
}
# Build modules for initramfs
function kernel_build_modules() {
local kernel_dir="$1"
local modules_install_dir="$2"
local version="${3:-$KERNEL_VERSION}"
section_header "Building Kernel Modules"
local source_dir="${kernel_dir}/current"
if [[ ! -d "$source_dir" ]]; then
log_error "Kernel source directory not found: ${source_dir}"
return 1
fi
safe_execute cd "$source_dir"
# Build modules
local cores=$(nproc)
local jobs=$((cores > 1 ? cores - 1 : 1))
log_info "Building kernel modules with ${jobs} parallel jobs"
safe_execute make -j${jobs} modules
# Install modules to staging area
log_info "Installing modules to: ${modules_install_dir}"
safe_mkdir "$modules_install_dir"
safe_execute make modules_install INSTALL_MOD_PATH="$modules_install_dir"
# Run depmod to create module dependencies
local modules_dir="${modules_install_dir}/lib/modules/${version}"
if [[ -d "$modules_dir" ]]; then
log_info "Running depmod for module dependencies"
safe_execute depmod -a -b "$modules_install_dir" "$version"
fi
log_info "Kernel modules build complete"
}
# Clean kernel build artifacts
function kernel_cleanup() {
local kernel_dir="$1"
local keep_source="${2:-false}"
section_header "Cleaning Kernel Build Artifacts"
if [[ "$keep_source" == "true" ]]; then
log_info "Keeping source, cleaning build artifacts only"
local source_dir="${kernel_dir}/current"
if [[ -d "$source_dir" ]]; then
safe_execute cd "$source_dir"
safe_execute make clean
fi
else
log_info "Removing entire kernel directory"
safe_rmdir "$kernel_dir"
fi
log_info "Kernel cleanup complete"
}
# Export functions
export -f kernel_download_source kernel_apply_config kernel_modify_config_for_initramfs
export -f kernel_build_with_initramfs kernel_build_modules kernel_cleanup

367
scripts/lib/testing.sh Normal file
View File

@@ -0,0 +1,367 @@
#!/bin/bash
# Testing with QEMU and cloud-hypervisor
# Source common functions
LIB_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${LIB_SCRIPT_DIR}/common.sh"
# Testing configuration
QEMU_MEMORY="${QEMU_MEMORY:-512M}"
QEMU_TIMEOUT="${QEMU_TIMEOUT:-60}"
CLOUD_HYPERVISOR_MEMORY="${CLOUD_HYPERVISOR_MEMORY:-512M}"
# Test kernel boot with QEMU
function testing_qemu_boot() {
local kernel_file="$1"
local test_type="${2:-basic}"
local timeout="${3:-$QEMU_TIMEOUT}"
section_header "Testing with QEMU"
if [[ ! -f "$kernel_file" ]]; then
log_error "Kernel file not found: ${kernel_file}"
return 1
fi
# Check if QEMU is available
if ! command_exists "qemu-system-x86_64"; then
log_error "QEMU not found. Install with: apt-get install qemu-system-x86"
return 1
fi
log_info "Kernel file: ${kernel_file}"
log_info "Memory: ${QEMU_MEMORY}"
log_info "Test type: ${test_type}"
log_info "Timeout: ${timeout}s"
case "$test_type" in
"basic")
testing_qemu_basic_boot "$kernel_file" "$timeout"
;;
"serial")
testing_qemu_serial_boot "$kernel_file" "$timeout"
;;
"interactive")
testing_qemu_interactive_boot "$kernel_file"
;;
*)
log_error "Unknown QEMU test type: ${test_type}"
return 1
;;
esac
}
# Basic QEMU boot test (automated)
function testing_qemu_basic_boot() {
local kernel_file="$1"
local timeout="$2"
log_info "Running basic QEMU boot test (${timeout}s timeout)"
# QEMU command for automated testing
local qemu_cmd="qemu-system-x86_64 \
-kernel $kernel_file \
-m $QEMU_MEMORY \
-nographic \
-serial mon:stdio \
-append 'console=ttyS0,115200 console=tty1 loglevel=7 panic=10' \
-no-reboot"
log_info "QEMU command: ${qemu_cmd}"
# Run with timeout
if timeout "$timeout" $qemu_cmd 2>&1 | tee /tmp/qemu-boot.log; then
log_info "QEMU boot test completed"
else
local exit_code=$?
if [[ $exit_code -eq 124 ]]; then
log_info "QEMU boot test timed out (${timeout}s) - this may be normal"
else
log_error "QEMU boot test failed with exit code: ${exit_code}"
return 1
fi
fi
# Check boot log for success indicators
testing_analyze_boot_log "/tmp/qemu-boot.log"
}
# QEMU serial console test
function testing_qemu_serial_boot() {
local kernel_file="$1"
local timeout="$2"
log_info "Running QEMU serial console test"
# QEMU command optimized for serial console
local qemu_cmd="qemu-system-x86_64 \
-kernel $kernel_file \
-m $QEMU_MEMORY \
-nographic \
-serial stdio \
-append 'console=ttyS0,115200n8 console=tty1 consoleblank=0 earlyprintk=serial,ttyS0,115200n8 loglevel=7'"
log_info "QEMU serial command: ${qemu_cmd}"
# Run with timeout and capture output
timeout "$timeout" $qemu_cmd 2>&1 | tee /tmp/qemu-serial.log
# Analyze serial output
testing_analyze_boot_log "/tmp/qemu-serial.log"
}
# Interactive QEMU boot (no timeout)
function testing_qemu_interactive_boot() {
local kernel_file="$1"
log_info "Starting interactive QEMU session"
log_info "Use Ctrl+A, X to exit QEMU"
# Interactive QEMU command
local qemu_cmd="qemu-system-x86_64 \
-kernel $kernel_file \
-m $QEMU_MEMORY \
-nographic \
-serial mon:stdio \
-append 'console=ttyS0,115200 console=tty1 loglevel=7'"
log_info "QEMU command: ${qemu_cmd}"
# Run interactively
safe_execute $qemu_cmd
}
# Test kernel boot with cloud-hypervisor
function testing_cloud_hypervisor_boot() {
local kernel_file="$1"
local test_type="${2:-basic}"
local timeout="${3:-$QEMU_TIMEOUT}"
section_header "Testing with cloud-hypervisor"
if [[ ! -f "$kernel_file" ]]; then
log_error "Kernel file not found: ${kernel_file}"
return 1
fi
# Check if cloud-hypervisor is available
if ! command_exists "cloud-hypervisor"; then
log_error "cloud-hypervisor not found. Install from: https://github.com/cloud-hypervisor/cloud-hypervisor"
return 1
fi
log_info "Kernel file: ${kernel_file}"
log_info "Memory: ${CLOUD_HYPERVISOR_MEMORY}"
log_info "Test type: ${test_type}"
case "$test_type" in
"basic")
testing_cloud_hypervisor_basic "$kernel_file" "$timeout"
;;
"serial")
testing_cloud_hypervisor_serial "$kernel_file" "$timeout"
;;
*)
log_error "Unknown cloud-hypervisor test type: ${test_type}"
return 1
;;
esac
}
# Basic cloud-hypervisor test
function testing_cloud_hypervisor_basic() {
local kernel_file="$1"
local timeout="$2"
log_info "Running basic cloud-hypervisor test"
# cloud-hypervisor command
local ch_cmd="cloud-hypervisor \
--kernel $kernel_file \
--memory size=${CLOUD_HYPERVISOR_MEMORY} \
--serial tty \
--console off \
--cmdline 'console=ttyS0,115200 loglevel=7 panic=10'"
log_info "cloud-hypervisor command: ${ch_cmd}"
# Run with timeout
if timeout "$timeout" $ch_cmd 2>&1 | tee /tmp/cloud-hypervisor-boot.log; then
log_info "cloud-hypervisor test completed"
else
local exit_code=$?
if [[ $exit_code -eq 124 ]]; then
log_info "cloud-hypervisor test timed out (${timeout}s)"
else
log_error "cloud-hypervisor test failed with exit code: ${exit_code}"
return 1
fi
fi
# Analyze boot log
testing_analyze_boot_log "/tmp/cloud-hypervisor-boot.log"
}
# cloud-hypervisor serial test
function testing_cloud_hypervisor_serial() {
local kernel_file="$1"
local timeout="$2"
log_info "Running cloud-hypervisor serial test"
# cloud-hypervisor command with serial focus
local ch_cmd="cloud-hypervisor \
--kernel $kernel_file \
--memory size=${CLOUD_HYPERVISOR_MEMORY} \
--serial tty \
--console off \
--cmdline 'console=ttyS0,115200n8 earlyprintk=serial loglevel=7'"
log_info "cloud-hypervisor serial command: ${ch_cmd}"
timeout "$timeout" $ch_cmd 2>&1 | tee /tmp/cloud-hypervisor-serial.log
testing_analyze_boot_log "/tmp/cloud-hypervisor-serial.log"
}
# Analyze boot log for success/failure indicators
function testing_analyze_boot_log() {
local log_file="$1"
section_header "Analyzing Boot Log"
if [[ ! -f "$log_file" ]]; then
log_warn "Boot log file not found: ${log_file}"
return 1
fi
local log_size=$(get_file_size "$log_file")
log_info "Boot log size: ${log_size}"
# Success indicators
local success_patterns=(
"zinit.*starting"
"zinit.*initialized"
"login:"
"zero-os.*login:"
"Alpine Linux"
"Welcome to Alpine"
)
# Error indicators
local error_patterns=(
"Kernel panic"
"kernel BUG"
"Unable to mount root"
"VFS: Cannot open root device"
"not syncing"
"Attempted to kill init"
)
local success_count=0
local error_count=0
# Check for success patterns
for pattern in "${success_patterns[@]}"; do
if grep -i "$pattern" "$log_file" >/dev/null 2>&1; then
log_info "✓ Found success indicator: ${pattern}"
((success_count++))
fi
done
# Check for error patterns
for pattern in "${error_patterns[@]}"; do
if grep -i "$pattern" "$log_file" >/dev/null 2>&1; then
log_error "✗ Found error indicator: ${pattern}"
((error_count++))
fi
done
# Summary
log_info "Boot log analysis:"
log_info " Success indicators: ${success_count}"
log_info " Error indicators: ${error_count}"
if [[ $error_count -gt 0 ]]; then
log_error "Boot test failed - errors detected in log"
log_info "Check full log at: ${log_file}"
return 1
elif [[ $success_count -gt 0 ]]; then
log_info "Boot test successful - system appears to be working"
return 0
else
log_warn "Boot test inconclusive - no clear success/error indicators"
return 2
fi
}
# Run comprehensive test suite
function testing_run_all() {
local kernel_file="$1"
local test_timeout="${2:-60}"
section_header "Running Comprehensive Test Suite"
local test_results=()
# Test with QEMU
log_info "Running QEMU tests..."
if testing_qemu_boot "$kernel_file" "basic" "$test_timeout"; then
test_results+=("QEMU-basic: PASS")
else
test_results+=("QEMU-basic: FAIL")
fi
if testing_qemu_boot "$kernel_file" "serial" "$test_timeout"; then
test_results+=("QEMU-serial: PASS")
else
test_results+=("QEMU-serial: FAIL")
fi
# Test with cloud-hypervisor (if available)
if command_exists "cloud-hypervisor"; then
log_info "Running cloud-hypervisor tests..."
if testing_cloud_hypervisor_boot "$kernel_file" "basic" "$test_timeout"; then
test_results+=("cloud-hypervisor-basic: PASS")
else
test_results+=("cloud-hypervisor-basic: FAIL")
fi
else
log_warn "cloud-hypervisor not available, skipping tests"
test_results+=("cloud-hypervisor: SKIPPED")
fi
# Report results
section_header "Test Results Summary"
local passed=0
local failed=0
local skipped=0
for result in "${test_results[@]}"; do
if [[ "$result" =~ PASS ]]; then
log_info "$result"
((passed++))
elif [[ "$result" =~ FAIL ]]; then
log_error "$result"
((failed++))
else
log_warn "$result"
((skipped++))
fi
done
log_info "Test summary: ${passed} passed, ${failed} failed, ${skipped} skipped"
if [[ $failed -eq 0 ]]; then
log_info "All tests passed successfully"
return 0
else
log_error "Some tests failed"
return 1
fi
}
# Export functions
export -f testing_qemu_boot testing_qemu_basic_boot testing_qemu_serial_boot testing_qemu_interactive_boot
export -f testing_cloud_hypervisor_boot testing_cloud_hypervisor_basic testing_cloud_hypervisor_serial
export -f testing_analyze_boot_log testing_run_all

199
scripts/test.sh Executable file
View File

@@ -0,0 +1,199 @@
#!/bin/bash
# Test script for Zero OS Alpine Initramfs
set -euo pipefail
# Script directory and project root detection
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# Source common functions and testing library
source "${SCRIPT_DIR}/lib/common.sh"
source "${SCRIPT_DIR}/lib/testing.sh"
# Test configuration
DEFAULT_KERNEL="${PROJECT_ROOT}/dist/vmlinuz.efi"
TEST_TIMEOUT="${TEST_TIMEOUT:-60}"
TEST_RUNNER="${TEST_RUNNER:-qemu}"
# Display usage information
function show_usage() {
cat << EOF
Zero OS Test Script
Usage: $0 [OPTIONS]
Options:
--qemu Test with QEMU (default)
--cloud-hypervisor Test with cloud-hypervisor
--all Test with all available runners
--serial Use serial console test
--interactive Interactive test session
--timeout SECONDS Test timeout in seconds (default: 60)
--kernel FILE Kernel file to test (default: dist/vmlinuz.efi)
--help Show this help message
Examples:
$0 # Basic QEMU test
$0 --qemu --serial # QEMU with serial console
$0 --cloud-hypervisor # Test with cloud-hypervisor
$0 --all # Test with all available runners
$0 --interactive # Interactive QEMU session
EOF
}
# Parse command line arguments
function parse_arguments() {
local kernel_file="$DEFAULT_KERNEL"
local test_type="basic"
local runners=()
while [[ $# -gt 0 ]]; do
case $1 in
--qemu)
runners+=("qemu")
shift
;;
--cloud-hypervisor)
runners+=("cloud-hypervisor")
shift
;;
--all)
runners=("qemu" "cloud-hypervisor")
shift
;;
--serial)
test_type="serial"
shift
;;
--interactive)
test_type="interactive"
shift
;;
--timeout)
TEST_TIMEOUT="$2"
shift 2
;;
--kernel)
kernel_file="$2"
shift 2
;;
--help|-h)
show_usage
exit 0
;;
*)
log_error "Unknown option: $1"
show_usage
exit 1
;;
esac
done
# Default to QEMU if no runner specified
if [[ ${#runners[@]} -eq 0 ]]; then
runners=("qemu")
fi
# Export parsed values
export KERNEL_FILE="$kernel_file"
export TEST_TYPE="$test_type"
export TEST_RUNNERS=("${runners[@]}")
}
# Run tests with specified runners
function run_tests() {
section_header "Running Zero OS Boot Tests"
# Verify kernel file exists
if [[ ! -f "$KERNEL_FILE" ]]; then
log_error "Kernel file not found: ${KERNEL_FILE}"
log_info "Run './scripts/build.sh' first to build the kernel"
return 1
fi
local kernel_size=$(get_file_size "$KERNEL_FILE")
log_info "Testing kernel: ${KERNEL_FILE} (${kernel_size})"
log_info "Test type: ${TEST_TYPE}"
log_info "Test timeout: ${TEST_TIMEOUT}s"
local test_results=()
local overall_success=true
# Run tests with each specified runner
for runner in "${TEST_RUNNERS[@]}"; do
log_info "Testing with runner: ${runner}"
case "$runner" in
"qemu")
if testing_qemu_boot "$KERNEL_FILE" "$TEST_TYPE" "$TEST_TIMEOUT"; then
test_results+=("QEMU-${TEST_TYPE}: PASS")
else
test_results+=("QEMU-${TEST_TYPE}: FAIL")
overall_success=false
fi
;;
"cloud-hypervisor")
if command_exists "cloud-hypervisor"; then
if testing_cloud_hypervisor_boot "$KERNEL_FILE" "$TEST_TYPE" "$TEST_TIMEOUT"; then
test_results+=("cloud-hypervisor-${TEST_TYPE}: PASS")
else
test_results+=("cloud-hypervisor-${TEST_TYPE}: FAIL")
overall_success=false
fi
else
log_warn "cloud-hypervisor not available, skipping"
test_results+=("cloud-hypervisor: SKIPPED")
fi
;;
*)
log_error "Unknown test runner: ${runner}"
test_results+=("${runner}: ERROR")
overall_success=false
;;
esac
done
# Report final results
section_header "Test Results Summary"
for result in "${test_results[@]}"; do
if [[ "$result" =~ PASS ]]; then
log_info "$result"
elif [[ "$result" =~ FAIL ]]; then
log_error "$result"
elif [[ "$result" =~ SKIPPED ]]; then
log_warn "$result"
else
log_error "$result"
fi
done
if [[ "$overall_success" == "true" ]]; then
log_info "All tests completed successfully"
return 0
else
log_error "Some tests failed"
return 1
fi
}
# Main function
function main() {
# Parse command line arguments
parse_arguments "$@"
echo ""
echo "=================================================="
echo "== ZERO-OS BOOT TESTING =="
echo "=================================================="
echo ""
# Run tests
run_tests
section_header "Testing Complete"
}
# Run main function with all arguments
main "$@"