Added youki build and fromatting of scripts
Some checks failed
Build Zero OS Initramfs / build (push) Has been cancelled
Build Zero OS Initramfs / test-matrix (qemu, basic) (push) Has been cancelled
Build Zero OS Initramfs / test-matrix (qemu, serial) (push) Has been cancelled

This commit is contained in:
2025-11-11 20:49:36 +01:00
parent 721e26a855
commit 947d156921
10 changed files with 1013 additions and 623 deletions

View File

@@ -401,6 +401,13 @@ function main_build_process() {
log_debug "stage_kernel_build: defaulting INITRAMFS_ARCHIVE=${INITRAMFS_ARCHIVE}"
fi
# Ensure FULL_KERNEL_VERSION is set for versioned output filename
if [[ -z "${FULL_KERNEL_VERSION:-}" ]]; then
FULL_KERNEL_VERSION=$(kernel_get_full_version "$KERNEL_VERSION" "$KERNEL_CONFIG")
export FULL_KERNEL_VERSION
log_debug "stage_kernel_build: resolved FULL_KERNEL_VERSION=${FULL_KERNEL_VERSION}"
fi
kernel_build_with_initramfs "$KERNEL_CONFIG" "$INITRAMFS_ARCHIVE" "$kernel_output"
export KERNEL_OUTPUT="$kernel_output"
}

View File

@@ -18,7 +18,7 @@ export DEBUG="${DEBUG:-1}"
source "${SCRIPT_DIR}/lib/common.sh"
function show_usage() {
cat << EOF
cat <<EOF
Zero OS Development Container Manager
Usage: $0 [COMMAND]
@@ -70,10 +70,10 @@ function ensure_builder_image() {
function dev_container_start() {
section_header "Starting Development Container"
# Ensure builder image exists (handles clean --all case and short-name policy)
ensure_builder_image
# Check if container already exists
if podman container exists "$CONTAINER_NAME" 2>/dev/null; then
if podman container inspect "$CONTAINER_NAME" --format '{{.State.Status}}' | grep -q "running"; then
@@ -85,15 +85,16 @@ function dev_container_start() {
return 0
fi
fi
log_info "Creating new development container: ${CONTAINER_NAME}"
# Create persistent container with all necessary mounts and environment
local podman_args=(
run -d
--name "$CONTAINER_NAME"
--privileged
-v "${PROJECT_ROOT}:/workspace"
-v "$HOME/.ssh:root/.ssh"
-w /workspace
-e DEBUG=1
-e ALPINE_VERSION=3.22
@@ -115,7 +116,7 @@ function dev_container_start() {
)
safe_execute podman "${podman_args[@]}"
log_info "Development container started successfully"
log_info "Container name: ${CONTAINER_NAME}"
log_info "Access with: $0 shell"
@@ -123,7 +124,7 @@ function dev_container_start() {
function dev_container_stop() {
section_header "Stopping Development Container"
if podman container exists "$CONTAINER_NAME" 2>/dev/null; then
log_info "Stopping development container: ${CONTAINER_NAME}"
safe_execute podman stop "$CONTAINER_NAME"
@@ -135,17 +136,17 @@ function dev_container_stop() {
function dev_container_shell() {
section_header "Entering Development Container Shell"
if ! podman container exists "$CONTAINER_NAME" 2>/dev/null; then
log_info "Development container not found, starting..."
dev_container_start
fi
if ! podman container inspect "$CONTAINER_NAME" --format '{{.State.Status}}' | grep -q "running"; then
log_info "Starting stopped development container"
safe_execute podman start "$CONTAINER_NAME"
fi
log_info "Entering container shell (exit with 'exit' or Ctrl+D)"
# Use direct execution for interactive shell (don't use safe_execute)
exec podman exec -it "$CONTAINER_NAME" /bin/bash
@@ -153,56 +154,56 @@ function dev_container_shell() {
function dev_container_build() {
section_header "Running Build in Development Container"
if ! podman container exists "$CONTAINER_NAME" 2>/dev/null; then
log_info "Development container not found, starting..."
dev_container_start
fi
if ! podman container inspect "$CONTAINER_NAME" --format '{{.State.Status}}' | grep -q "running"; then
log_info "Starting stopped development container"
safe_execute podman start "$CONTAINER_NAME"
fi
log_info "Running build in persistent container (real-time output)"
log_info "Command: podman exec $CONTAINER_NAME ./scripts/build.sh $*"
# Use direct execution to show real-time output (bypass safe_execute)
podman exec "$CONTAINER_NAME" ./scripts/build.sh "$@"
local exit_code=$?
if [[ $exit_code -eq 0 ]]; then
log_info "Build completed successfully in container"
else
log_error "Build failed in container with exit code: $exit_code"
fi
return $exit_code
}
function dev_container_clean() {
section_header "Cleaning Development Container"
if podman container exists "$CONTAINER_NAME" 2>/dev/null; then
log_info "Removing existing development container"
safe_execute podman rm -f "$CONTAINER_NAME"
fi
log_info "Starting fresh development container"
dev_container_start
}
function dev_container_status() {
section_header "Development Container Status"
if podman container exists "$CONTAINER_NAME" 2>/dev/null; then
local status=$(podman container inspect "$CONTAINER_NAME" --format '{{.State.Status}}')
local created=$(podman container inspect "$CONTAINER_NAME" --format '{{.Created}}')
log_info "Container: ${CONTAINER_NAME}"
log_info "Status: ${status}"
log_info "Created: ${created}"
if [[ "$status" == "running" ]]; then
log_info "✓ Ready for development"
else
@@ -216,7 +217,7 @@ function dev_container_status() {
function dev_container_logs() {
section_header "Development Container Logs"
if podman container exists "$CONTAINER_NAME" 2>/dev/null; then
safe_execute podman logs "$CONTAINER_NAME"
else
@@ -228,39 +229,39 @@ function dev_container_logs() {
# Main function
function main() {
local command="${1:-help}"
case "$command" in
start)
dev_container_start
;;
stop)
dev_container_stop
;;
shell)
dev_container_shell
;;
build)
shift
dev_container_build "$@"
;;
clean)
dev_container_clean
;;
status)
dev_container_status
;;
logs)
dev_container_logs
;;
help|--help|-h)
show_usage
;;
*)
log_error "Unknown command: $command"
show_usage
exit 1
;;
start)
dev_container_start
;;
stop)
dev_container_stop
;;
shell)
dev_container_shell
;;
build)
shift
dev_container_build "$@"
;;
clean)
dev_container_clean
;;
status)
dev_container_status
;;
logs)
dev_container_logs
;;
help | --help | -h)
show_usage
;;
*)
log_error "Unknown command: $command"
show_usage
exit 1
;;
esac
}
main "$@"
main "$@"

View File

@@ -195,6 +195,26 @@ function get_file_size() {
fi
}
# Get short git commit hash from a git repository directory
function get_git_commit_hash() {
local repo_dir="$1"
local short="${2:-true}" # Default to short hash
if [[ ! -d "$repo_dir/.git" ]]; then
echo "unknown"
return 1
fi
local hash
if [[ "$short" == "true" ]]; then
hash=$(cd "$repo_dir" && git rev-parse --short HEAD 2>/dev/null || echo "unknown")
else
hash=$(cd "$repo_dir" && git rev-parse HEAD 2>/dev/null || echo "unknown")
fi
echo "$hash"
}
# Wait for file to exist with timeout
function wait_for_file() {
local file="$1"

File diff suppressed because it is too large Load Diff

View File

@@ -223,20 +223,219 @@ function kernel_build_with_initramfs() {
output_dir=$(dirname "$output_abs")
safe_mkdir "$output_dir"
safe_copy "$kernel_image" "$output_abs"
# Also copy with versioned filename including kernel version and zinit hash
local full_kernel_version="${FULL_KERNEL_VERSION:-unknown}"
local zinit_hash="unknown"
local zinit_dir="${COMPONENTS_DIR:-${PROJECT_ROOT}/components}/zinit"
if [[ -d "$zinit_dir/.git" ]]; then
zinit_hash=$(get_git_commit_hash "$zinit_dir")
else
log_warn "zinit git directory not found at ${zinit_dir}, using 'unknown' for hash"
fi
# Create versioned filename: vmlinuz-{VERSION}-{ZINIT_HASH}.efi
local versioned_name="vmlinuz-${full_kernel_version}-${zinit_hash}.efi"
local versioned_output="${output_dir}/${versioned_name}"
safe_copy "$kernel_image" "$versioned_output"
# Verify final kernel
local kernel_size
kernel_size=$(get_file_size "$output_abs")
local versioned_size
versioned_size=$(get_file_size "$versioned_output")
log_info "Kernel build complete:"
log_info " Output file: ${output_abs}"
log_info " Versioned: ${versioned_output}"
log_info " Kernel size: ${kernel_size}"
log_info " Version: ${full_kernel_version}"
log_info " zinit hash: ${zinit_hash}"
# Verify initramfs is embedded
if strings "$output_file" | grep -q "initramfs"; then
log_info "✓ Initramfs appears to be embedded in kernel"
else
log_warn "Initramfs embedding verification inconclusive"
fi
# Upload versioned kernel to S3 if enabled
kernel_upload_to_s3 "$versioned_output" "$full_kernel_version" "$zinit_hash"
}
# Upload versioned kernel to S3 using MinIO client (mcli/mc)
function kernel_upload_to_s3() {
local kernel_file="$1"
local kernel_version="$2"
local zinit_hash="$3"
section_header "Uploading Kernel to S3"
# Check if upload is enabled
if [[ "${UPLOAD_KERNEL:-false}" != "true" ]]; then
log_info "UPLOAD_KERNEL not enabled; skipping kernel upload"
return 0
fi
# Verify kernel file exists
if [[ ! -f "$kernel_file" ]]; then
log_error "Kernel file not found: ${kernel_file}"
return 1
fi
# Load S3 configuration from rfs.conf
local rfs_conf="${PROJECT_ROOT}/config/rfs.conf"
local rfs_conf_example="${PROJECT_ROOT}/config/rfs.conf.example"
if [[ -f "$rfs_conf" ]]; then
# shellcheck source=/dev/null
source "$rfs_conf"
log_info "Loaded S3 config from: ${rfs_conf}"
elif [[ -f "$rfs_conf_example" ]]; then
# shellcheck source=/dev/null
source "$rfs_conf_example"
log_warn "Using example S3 config: ${rfs_conf_example}"
else
log_error "No S3 config found (config/rfs.conf or config/rfs.conf.example)"
return 1
fi
# Validate required S3 variables
for var in S3_ENDPOINT S3_BUCKET S3_PREFIX S3_ACCESS_KEY S3_SECRET_KEY; do
if [[ -z "${!var}" ]]; then
log_error "Missing required S3 variable: ${var}"
return 1
fi
done
# Detect MinIO client binary (mcli or mc)
local mcli_bin=""
if command -v mcli >/dev/null 2>&1; then
mcli_bin="mcli"
elif command -v mc >/dev/null 2>&1; then
mcli_bin="mc"
else
log_warn "MinIO Client not found (expected mcli or mc); skipping kernel upload"
return 0
fi
log_info "Using MinIO client: ${mcli_bin}"
# Setup S3 alias
log_info "Configuring S3 alias..."
safe_execute "${mcli_bin}" alias set rfs "${S3_ENDPOINT}" "${S3_ACCESS_KEY}" "${S3_SECRET_KEY}"
# Construct destination path: rfs/{bucket}/{prefix}/kernel/{versioned_filename}
local kernel_filename
kernel_filename=$(basename "$kernel_file")
local kernel_subpath="${KERNEL_SUBPATH:-kernel}"
local mcli_dst="rfs/${S3_BUCKET}/${S3_PREFIX%/}/${kernel_subpath%/}/${kernel_filename}"
# Upload kernel
log_info "Uploading: ${kernel_file} -> ${mcli_dst}"
safe_execute "${mcli_bin}" cp "${kernel_file}" "${mcli_dst}"
log_info "✓ Kernel uploaded successfully"
log_info " Version: ${kernel_version}"
log_info " zinit: ${zinit_hash}"
log_info " S3 path: ${mcli_dst}"
# Generate and upload kernel index
kernel_generate_index "${mcli_bin}" "${S3_BUCKET}" "${S3_PREFIX}" "${kernel_subpath}"
}
# Generate kernel index file from S3 listing and upload it
function kernel_generate_index() {
local mcli_bin="$1"
local bucket="$2"
local prefix="$3"
local kernel_subpath="$4"
section_header "Generating Kernel Index"
# Construct S3 path for listing
local s3_path="rfs/${bucket}/${prefix%/}/${kernel_subpath%/}/"
log_info "Listing kernels from: ${s3_path}"
# List all files in the kernel directory
local ls_output
if ! ls_output=$("${mcli_bin}" ls "${s3_path}" 2>&1); then
log_warn "Failed to list S3 kernel directory, index not generated"
log_debug "mcli ls output: ${ls_output}"
return 0
fi
# Parse output and extract kernel filenames matching vmlinuz-*
local kernels=()
while IFS= read -r line; do
# mcli ls output format: [DATE TIME TZ] SIZE FILENAME
# Extract filename (last field)
local filename
filename=$(echo "$line" | awk '{print $NF}')
# Filter for vmlinuz files (both .efi and without extension)
if [[ "$filename" =~ ^vmlinuz-.* ]]; then
kernels+=("$filename")
fi
done <<< "$ls_output"
if [[ ${#kernels[@]} -eq 0 ]]; then
log_warn "No kernels found in S3 path: ${s3_path}"
return 0
fi
log_info "Found ${#kernels[@]} kernel(s)"
# Create index files in dist directory
local index_dir="${DIST_DIR:-${PROJECT_ROOT}/dist}"
local text_index="${index_dir}/kernels.txt"
local json_index="${index_dir}/kernels.json"
# Generate text index (one kernel per line, sorted)
printf "%s\n" "${kernels[@]}" | sort -r > "$text_index"
log_info "Created text index: ${text_index}"
# Generate JSON index (array of kernel filenames)
{
echo "{"
echo " \"kernels\": ["
local first=true
for kernel in $(printf "%s\n" "${kernels[@]}" | sort -r); do
if [[ "$first" == "true" ]]; then
first=false
else
echo ","
fi
printf " \"%s\"" "$kernel"
done
echo ""
echo " ],"
echo " \"updated\": \"$(date -u +%Y-%m-%dT%H:%M:%SZ)\","
echo " \"count\": ${#kernels[@]}"
echo "}"
} > "$json_index"
log_info "Created JSON index: ${json_index}"
# Upload both index files to S3
log_info "Uploading kernel index files to S3..."
local text_dst="${s3_path}kernels.txt"
local json_dst="${s3_path}kernels.json"
if safe_execute "${mcli_bin}" cp "$text_index" "$text_dst"; then
log_info "✓ Uploaded text index: ${text_dst}"
else
log_warn "Failed to upload text index"
fi
if safe_execute "${mcli_bin}" cp "$json_index" "$json_dst"; then
log_info "✓ Uploaded JSON index: ${json_dst}"
else
log_warn "Failed to upload JSON index"
fi
log_info "Kernel index generation complete"
}
# Build and install modules in container for proper dependency resolution