Change pack source: install all linux-firmware* packages in container and pack from /lib/firmware via [bash.rfs_common_install_all_alpine_firmware_packages()](scripts/rfs/common.sh:290) used by [bash.pack-firmware.sh](scripts/rfs/pack-firmware.sh:21). At runtime, overmount firmware flist on /lib/firmware by updating [sh.firmware.sh](config/zinit/init/firmware.sh:10). Update docs to reflect /lib/firmware mount and new pack strategy.
560 lines
18 KiB
Bash
Executable File
560 lines
18 KiB
Bash
Executable File
#!/bin/bash
|
|
# Common helpers for RFS flist creation and manifest patching
|
|
# - No changes to existing build pipeline; this library is used by standalone scripts under scripts/rfs
|
|
# - Computes FULL_KERNEL_VERSION from configs (never uses uname -r)
|
|
# - Loads S3 (garage) config and builds rfs S3 store URI
|
|
# - Locates rfs binary and source trees for modules/firmware
|
|
# - Provides helper to patch .fl (sqlite) stores table to use HTTPS web endpoint
|
|
|
|
set -euo pipefail
|
|
|
|
# Resolve project root from this file location
|
|
rfs_common_project_root() {
|
|
local here
|
|
here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
# scripts/rfs -> project root is two levels up
|
|
dirname "$(dirname "$here")"
|
|
}
|
|
|
|
PROJECT_ROOT="${PROJECT_ROOT:-$(rfs_common_project_root)}"
|
|
SCRIPT_DIR="${PROJECT_ROOT}/scripts"
|
|
LIB_DIR="${SCRIPT_DIR}/lib"
|
|
|
|
# Bring in logging and helpers if available
|
|
if [[ -f "${LIB_DIR}/common.sh" ]]; then
|
|
# shellcheck source=/dev/null
|
|
source "${LIB_DIR}/common.sh"
|
|
else
|
|
# Minimal logging fallbacks
|
|
log_info() { echo "[INFO] $*"; }
|
|
log_warn() { echo "[WARN] $*" >&2; }
|
|
log_error() { echo "[ERROR] $*" >&2; }
|
|
log_debug() { if [[ "${DEBUG:-0}" == "1" ]]; then echo "[DEBUG] $*"; fi }
|
|
safe_execute() { echo "[EXEC] $*"; "$@"; }
|
|
fi
|
|
|
|
# -----------------------------------------------------------------------------
|
|
# Config loaders
|
|
# -----------------------------------------------------------------------------
|
|
|
|
# Load build.conf (KERNEL_VERSION, etc.) and compute FULL_KERNEL_VERSION
|
|
# FULL_KERNEL_VERSION = KERNEL_VERSION + CONFIG_LOCALVERSION from config/kernel.config
|
|
rfs_common_load_build_kernel_version() {
|
|
local build_conf="${PROJECT_ROOT}/config/build.conf"
|
|
local kcfg="${PROJECT_ROOT}/config/kernel.config"
|
|
|
|
if [[ -f "$build_conf" ]]; then
|
|
# shellcheck source=/dev/null
|
|
source "$build_conf"
|
|
else
|
|
log_error "Missing build config: ${build_conf}"
|
|
return 1
|
|
fi
|
|
|
|
local base_ver="${KERNEL_VERSION:-}"
|
|
if [[ -z "$base_ver" ]]; then
|
|
log_error "KERNEL_VERSION not set in ${build_conf}"
|
|
return 1
|
|
fi
|
|
|
|
if [[ ! -f "$kcfg" ]]; then
|
|
log_error "Missing kernel config: ${kcfg}"
|
|
return 1
|
|
fi
|
|
|
|
# Extract CONFIG_LOCALVERSION="..."; may include leading '-' in value
|
|
local localver
|
|
localver="$(grep -E '^CONFIG_LOCALVERSION=' "$kcfg" | cut -d'"' -f2 || true)"
|
|
local full_ver="${base_ver}${localver}"
|
|
|
|
if [[ -z "$full_ver" ]]; then
|
|
log_error "Failed to compute FULL_KERNEL_VERSION from configs"
|
|
return 1
|
|
fi
|
|
|
|
export FULL_KERNEL_VERSION="$full_ver"
|
|
log_info "Computed FULL_KERNEL_VERSION: ${FULL_KERNEL_VERSION}"
|
|
}
|
|
|
|
# Load RFS S3 configuration from config/rfs.conf or config/rfs.conf.example
|
|
# Required:
|
|
# S3_ENDPOINT, S3_REGION, S3_BUCKET, S3_PREFIX, S3_ACCESS_KEY, S3_SECRET_KEY
|
|
rfs_common_load_rfs_s3_config() {
|
|
local conf_real="${PROJECT_ROOT}/config/rfs.conf"
|
|
local conf_example="${PROJECT_ROOT}/config/rfs.conf.example"
|
|
|
|
if [[ -f "$conf_real" ]]; then
|
|
# shellcheck source=/dev/null
|
|
source "$conf_real"
|
|
log_info "Loaded RFS S3 config: ${conf_real}"
|
|
elif [[ -f "$conf_example" ]]; then
|
|
# shellcheck source=/dev/null
|
|
source "$conf_example"
|
|
log_warn "Using example RFS config: ${conf_example} (override with config/rfs.conf)"
|
|
else
|
|
log_error "No RFS config found. Create config/rfs.conf or config/rfs.conf.example"
|
|
return 1
|
|
fi
|
|
|
|
# Allow environment to override sourced values
|
|
S3_ENDPOINT="${S3_ENDPOINT:-}"
|
|
S3_REGION="${S3_REGION:-}"
|
|
S3_BUCKET="${S3_BUCKET:-}"
|
|
S3_PREFIX="${S3_PREFIX:-}"
|
|
S3_ACCESS_KEY="${S3_ACCESS_KEY:-}"
|
|
S3_SECRET_KEY="${S3_SECRET_KEY:-}"
|
|
|
|
local missing=0
|
|
for v in S3_ENDPOINT S3_REGION S3_BUCKET S3_PREFIX S3_ACCESS_KEY S3_SECRET_KEY; do
|
|
if [[ -z "${!v}" ]]; then
|
|
log_error "Missing required S3 config variable: ${v}"
|
|
missing=1
|
|
fi
|
|
done
|
|
if [[ $missing -ne 0 ]]; then
|
|
log_error "Incomplete RFS S3 configuration"
|
|
return 1
|
|
fi
|
|
|
|
export S3_ENDPOINT S3_REGION S3_BUCKET S3_PREFIX S3_ACCESS_KEY S3_SECRET_KEY
|
|
|
|
# Validate placeholders are not left as defaults
|
|
if [[ "${S3_ACCESS_KEY}" == "REPLACE_ME" || "${S3_SECRET_KEY}" == "REPLACE_ME" ]]; then
|
|
log_error "S3_ACCESS_KEY / S3_SECRET_KEY in config/rfs.conf are placeholders. Please set real credentials."
|
|
return 1
|
|
fi
|
|
|
|
# Optional read-only credentials for route URL; default to write keys if not provided
|
|
READ_ACCESS_KEY="${READ_ACCESS_KEY:-$S3_ACCESS_KEY}"
|
|
READ_SECRET_KEY="${READ_SECRET_KEY:-$S3_SECRET_KEY}"
|
|
# Garage blob route path (default /blobs)
|
|
ROUTE_PATH="${ROUTE_PATH:-/blobs}"
|
|
export READ_ACCESS_KEY READ_SECRET_KEY ROUTE_PATH
|
|
}
|
|
|
|
# Build rfs S3 store URI from loaded S3 config
|
|
# Format: s3://ACCESS:SECRET@HOST:PORT/BUCKET/PREFIX?region=REGION
|
|
rfs_common_build_s3_store_uri() {
|
|
if [[ -z "${S3_ENDPOINT:-}" ]]; then
|
|
log_error "S3_ENDPOINT not set; call rfs_common_load_rfs_s3_config first"
|
|
return 1
|
|
fi
|
|
|
|
# Strip scheme from endpoint
|
|
local hostport="${S3_ENDPOINT#http://}"
|
|
hostport="${hostport#https://}"
|
|
hostport="${hostport%/}"
|
|
# Ensure explicit port; default to Garage S3 port 3900 when missing
|
|
if [[ "$hostport" != *:* ]]; then
|
|
hostport="${hostport}:3900"
|
|
fi
|
|
|
|
# Minimal percent-encoding for ':' and '@' in credentials
|
|
local ak="${S3_ACCESS_KEY//:/%3A}"
|
|
ak="${ak//@/%40}"
|
|
local sk="${S3_SECRET_KEY//:/%3A}"
|
|
sk="${sk//@/%40}"
|
|
|
|
local path="${S3_BUCKET}/${S3_PREFIX}"
|
|
path="${path#/}" # ensure no leading slash duplication
|
|
|
|
local uri="s3://${ak}:${sk}@${hostport}/${path}?region=${S3_REGION}"
|
|
export RFS_S3_STORE_URI="$uri"
|
|
log_info "Constructed RFS S3 store URI: ${RFS_S3_STORE_URI}"
|
|
}
|
|
|
|
# -----------------------------------------------------------------------------
|
|
# Tool discovery
|
|
# -----------------------------------------------------------------------------
|
|
|
|
# Locate rfs binary: prefer PATH, fallback to components build
|
|
rfs_common_locate_rfs() {
|
|
if command -v rfs >/dev/null 2>&1; then
|
|
export RFS_BIN="$(command -v rfs)"
|
|
log_info "Using rfs from PATH: ${RFS_BIN}"
|
|
return 0
|
|
fi
|
|
|
|
# Fallback to components
|
|
local rtarget
|
|
if [[ -f "${PROJECT_ROOT}/config/build.conf" ]]; then
|
|
# shellcheck source=/dev/null
|
|
source "${PROJECT_ROOT}/config/build.conf"
|
|
fi
|
|
rtarget="${RUST_TARGET:-x86_64-unknown-linux-musl}"
|
|
|
|
local candidate="${PROJECT_ROOT}/components/rfs/target/${rtarget}/release/rfs"
|
|
if [[ -x "$candidate" ]]; then
|
|
export RFS_BIN="$candidate"
|
|
log_info "Using rfs from components: ${RFS_BIN}"
|
|
return 0
|
|
fi
|
|
|
|
log_error "rfs binary not found. Build it via components stage or install it in PATH."
|
|
return 1
|
|
}
|
|
|
|
# Ensure sqlite3 is available (for manifest patch)
|
|
rfs_common_require_sqlite3() {
|
|
if ! command -v sqlite3 >/dev/null 2>&1; then
|
|
log_error "sqlite3 not found. Install sqlite3 to patch .fl manifest stores."
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# -----------------------------------------------------------------------------
|
|
# Source tree discovery
|
|
# -----------------------------------------------------------------------------
|
|
|
|
# Locate modules directory for FULL_KERNEL_VERSION
|
|
# Priority:
|
|
# 1) /lib/modules/<FULL_KERNEL_VERSION>
|
|
# 2) ${PROJECT_ROOT}/kernel/lib/modules/<FULL_KERNEL_VERSION>
|
|
# 3) ${PROJECT_ROOT}/initramfs/lib/modules/<FULL_KERNEL_VERSION>
|
|
rfs_common_locate_modules_dir() {
|
|
local kver="${1:-${FULL_KERNEL_VERSION:-}}"
|
|
if [[ -z "$kver" ]]; then
|
|
log_error "rfs_common_locate_modules_dir: FULL_KERNEL_VERSION is empty"
|
|
return 1
|
|
fi
|
|
|
|
local candidates=(
|
|
"/lib/modules/${kver}"
|
|
"${PROJECT_ROOT}/kernel/lib/modules/${kver}"
|
|
"${PROJECT_ROOT}/initramfs/lib/modules/${kver}"
|
|
)
|
|
local d
|
|
for d in "${candidates[@]}"; do
|
|
if [[ -d "$d" ]]; then
|
|
export MODULES_DIR="$d"
|
|
log_info "Found modules dir: ${MODULES_DIR}"
|
|
return 0
|
|
fi
|
|
done
|
|
|
|
log_error "No modules directory found for ${kver}. Checked: ${candidates[*]}"
|
|
return 1
|
|
}
|
|
|
|
# Locate firmware directory
|
|
# Priority:
|
|
# 1) ${PROJECT_ROOT}/firmware
|
|
# 2) ${PROJECT_ROOT}/initramfs/lib/firmware
|
|
# 3) /lib/firmware
|
|
rfs_common_locate_firmware_dir() {
|
|
local candidates=(
|
|
"${PROJECT_ROOT}/firmware"
|
|
"${PROJECT_ROOT}/initramfs/lib/firmware"
|
|
"/lib/firmware"
|
|
)
|
|
local d
|
|
for d in "${candidates[@]}"; do
|
|
if [[ -d "$d" ]]; then
|
|
export FIRMWARE_DIR="$d"
|
|
log_info "Found firmware dir: ${FIRMWARE_DIR}"
|
|
return 0
|
|
fi
|
|
done
|
|
|
|
log_error "No firmware directory found. Checked: ${candidates[*]}"
|
|
return 1
|
|
}
|
|
|
|
# Ensure precomputed modules metadata are present (to avoid depmod at boot)
|
|
rfs_common_validate_modules_metadata() {
|
|
local md="${MODULES_DIR:-}"
|
|
if [[ -z "$md" || ! -d "$md" ]]; then
|
|
log_error "MODULES_DIR not set or invalid"
|
|
return 1
|
|
fi
|
|
local ok=1
|
|
local files=(modules.dep modules.dep.bin modules.alias modules.alias.bin modules.symbols.bin modules.order modules.builtin modules.builtin.modinfo)
|
|
local missing=()
|
|
for f in "${files[@]}"; do
|
|
if [[ ! -f "${md}/${f}" ]]; then
|
|
missing+=("$f")
|
|
ok=0
|
|
fi
|
|
done
|
|
|
|
if [[ $ok -eq 1 ]]; then
|
|
log_info "Modules metadata present in ${md}"
|
|
return 0
|
|
else
|
|
log_warn "Missing some modules metadata in ${md}: ${missing[*]}"
|
|
# Not fatal; rfs pack can proceed, but boot may require depmod -A or full scan
|
|
return 0
|
|
fi
|
|
}
|
|
|
|
# -----------------------------------------------------------------------------
|
|
# Alpine firmware installation in container (for flist packing)
|
|
# -----------------------------------------------------------------------------
|
|
# Install all available linux-firmware* packages into the build container root.
|
|
# This prepares a full /lib/firmware tree to be packed into an RFS flist.
|
|
# Notes:
|
|
# - Requires root inside the container (the builder image runs scripts as root).
|
|
# - Uses apk search to enumerate all linux-firmware packages; installs them all.
|
|
rfs_common_install_all_alpine_firmware_packages() {
|
|
log_info "Installing all Alpine linux-firmware* packages into container (/lib/firmware)"
|
|
# Ensure apk index is fresh
|
|
safe_execute apk update
|
|
|
|
# Enumerate firmware packages. Output of 'apk search' looks like:
|
|
# linux-firmware-rtl_nic-20231030-r0
|
|
# Strip version suffix to get the package names acceptable to apk add.
|
|
local pkgs_raw
|
|
pkgs_raw="$(apk search 'linux-firmware*' 2>/dev/null || true)"
|
|
|
|
if [[ -z "${pkgs_raw// }" ]]; then
|
|
log_warn "No linux-firmware* packages found via apk search"
|
|
fi
|
|
|
|
# Build unique package list without versions
|
|
# 1) take first column (package-with-version)
|
|
# 2) strip trailing '-<digits...>' (version/revision)
|
|
# 3) dedupe
|
|
local pkgs=()
|
|
while IFS= read -r line; do
|
|
[[ -z "${line// }" ]] && continue
|
|
local name="${line%% *}"
|
|
name="$(echo "$name" | sed -E 's/-[0-9].*$//')"
|
|
if [[ -n "$name" ]]; then
|
|
pkgs+=("$name")
|
|
fi
|
|
done <<< "$pkgs_raw"
|
|
|
|
if [[ ${#pkgs[@]} -eq 0 ]]; then
|
|
log_warn "Firmware package list is empty after parsing; attempting base meta-package 'linux-firmware'"
|
|
pkgs=("linux-firmware")
|
|
fi
|
|
|
|
# Deduplicate while preserving order
|
|
local seen="" final_pkgs=()
|
|
for p in "${pkgs[@]}"; do
|
|
if ! grep -qx "$p" <<< "$seen"; then
|
|
final_pkgs+=("$p")
|
|
seen+=$'\n'"$p"
|
|
fi
|
|
done
|
|
|
|
log_info "Installing ${#final_pkgs[@]} firmware packages:"
|
|
for p in "${final_pkgs[@]}"; do
|
|
log_info " - $p"
|
|
done
|
|
|
|
# Install all firmware packages; allow some failures (not all subpackages exist on all arches)
|
|
local failed=()
|
|
for p in "${final_pkgs[@]}"; do
|
|
if apk add --no-cache "$p" >/dev/null 2>&1; then
|
|
log_debug "Installed: $p"
|
|
else
|
|
log_warn "Failed to install: $p"
|
|
failed+=("$p")
|
|
fi
|
|
done
|
|
|
|
# Quick check that /lib/firmware exists and is populated
|
|
if [[ -d "/lib/firmware" ]]; then
|
|
local cnt
|
|
cnt=$(find /lib/firmware -type f | wc -l || echo 0)
|
|
log_info "/lib/firmware population: ${cnt} files"
|
|
if [[ "$cnt" -eq 0 ]]; then
|
|
log_warn "/lib/firmware exists but is empty after installation"
|
|
fi
|
|
else
|
|
log_error "/lib/firmware directory not found after firmware installation"
|
|
return 1
|
|
fi
|
|
|
|
if [[ ${#failed[@]} -gt 0 ]]; then
|
|
log_warn "Some firmware packages failed to install (${#failed[@]} failures); proceeding with available set"
|
|
fi
|
|
return 0
|
|
}
|
|
|
|
# -----------------------------------------------------------------------------
|
|
# Manifest patching (sqlite .fl)
|
|
# -----------------------------------------------------------------------------
|
|
|
|
# Patch the .fl manifest's stores table to use an HTTPS web endpoint
|
|
# Args:
|
|
# $1 = path to .fl file
|
|
# $2 = HTTPS base (e.g., https://hub.grid.tf/zos/zosbuilder) - no trailing slash
|
|
# $3 = keep_s3_fallback ("true"/"false") - if true, retain existing s3:// row(s)
|
|
rfs_common_patch_flist_stores() {
|
|
local fl="$1"
|
|
local web_base="$2"
|
|
local keep_s3="${3:-false}"
|
|
|
|
if [[ ! -f "$fl" ]]; then
|
|
log_error "Manifest file not found: ${fl}"
|
|
return 1
|
|
fi
|
|
if [[ -z "$web_base" ]]; then
|
|
log_error "Web endpoint base is empty"
|
|
return 1
|
|
fi
|
|
|
|
rfs_common_require_sqlite3
|
|
|
|
# Ensure no trailing slash
|
|
web_base="${web_base%/}"
|
|
|
|
# Heuristic: if stores table exists, update any s3:// URI to the web_base, or insert web_base if none.
|
|
local has_table
|
|
has_table="$(sqlite3 "$fl" "SELECT name FROM sqlite_master WHERE type='table' AND name='stores';" || true)"
|
|
if [[ -z "$has_table" ]]; then
|
|
log_error "stores table not found in manifest (unexpected schema): ${fl}"
|
|
return 1
|
|
fi
|
|
|
|
# Does any s3 store exist?
|
|
local s3_count
|
|
s3_count="$(sqlite3 "$fl" "SELECT COUNT(*) FROM stores WHERE uri LIKE 's3://%';" || echo 0)"
|
|
|
|
if [[ "${keep_s3}" != "true" ]]; then
|
|
# Replace all s3://... URIs with the HTTPS web base
|
|
log_info "Replacing s3 stores with HTTPS: ${web_base}"
|
|
sqlite3 "$fl" "UPDATE stores SET uri='${web_base}' WHERE uri LIKE 's3://%';"
|
|
else
|
|
# Keep s3, but ensure https row exists and is ordered first if applicable
|
|
local https_count
|
|
https_count="$(sqlite3 "$fl" "SELECT COUNT(*) FROM stores WHERE uri='${web_base}';" || echo 0)"
|
|
if [[ "$https_count" -eq 0 ]]; then
|
|
log_info "Adding HTTPS store ${web_base} alongside existing s3 store(s)"
|
|
# Attempt simple insert; table schema may include more columns, so try a best-effort approach:
|
|
# Assume minimal schema: (id INTEGER PRIMARY KEY, uri TEXT UNIQUE)
|
|
# If fails, user can adjust with rfs CLI.
|
|
set +e
|
|
sqlite3 "$fl" "INSERT OR IGNORE INTO stores(uri) VALUES('${web_base}');"
|
|
local rc=$?
|
|
set -e
|
|
if [[ $rc -ne 0 ]]; then
|
|
log_warn "Could not INSERT into stores; schema may be different. Consider using rfs CLI to add store."
|
|
fi
|
|
else
|
|
log_info "HTTPS store already present in manifest"
|
|
fi
|
|
fi
|
|
|
|
log_info "Patched stores in manifest: ${fl}"
|
|
return 0
|
|
}
|
|
|
|
# -----------------------------------------------------------------------------
|
|
# -----------------------------------------------------------------------------
|
|
# Manifest route URL patching (sqlite .fl) - use read-only credentials
|
|
# -----------------------------------------------------------------------------
|
|
|
|
# Build route URL for the flist 'route' table using read-only keys
|
|
# Result example:
|
|
# s3://READ_KEY:READ_SECRET@host:3900/blobs?region=garage
|
|
rfs_common_build_route_url() {
|
|
# Ensure sqlite available for later patch step
|
|
rfs_common_require_sqlite3
|
|
|
|
# Defaults applicable to Garage
|
|
local route_region="${ROUTE_REGION:-garage}"
|
|
local route_path="${ROUTE_PATH:-/blobs}"
|
|
|
|
# Derive host:port from ROUTE_ENDPOINT or S3_ENDPOINT
|
|
local endpoint="${ROUTE_ENDPOINT:-${S3_ENDPOINT:-}}"
|
|
if [[ -z "$endpoint" ]]; then
|
|
log_error "No ROUTE_ENDPOINT or S3_ENDPOINT set; cannot build route URL"
|
|
return 1
|
|
fi
|
|
local hostport="${endpoint#http://}"
|
|
hostport="${hostport#https://}"
|
|
hostport="${hostport%/}"
|
|
# Ensure explicit port; default to Garage S3 port 3900 when missing
|
|
if [[ "$hostport" != *:* ]]; then
|
|
hostport="${hostport}:3900"
|
|
fi
|
|
|
|
# Percent-encode credentials minimally for ':' and '@'
|
|
local rak="${READ_ACCESS_KEY//:/%3A}"
|
|
rak="${rak//@/%40}"
|
|
local rsk="${READ_SECRET_KEY//:/%3A}"
|
|
rsk="${rsk//@/%40}"
|
|
|
|
# Normalize route path (ensure leading slash)
|
|
if [[ "$route_path" != /* ]]; then
|
|
route_path="/${route_path}"
|
|
fi
|
|
|
|
local url="s3://${rak}:${rsk}@${hostport}${route_path}?region=${route_region}"
|
|
export RFS_ROUTE_URL="$url"
|
|
log_info "Constructed route URL for flist: ${RFS_ROUTE_URL}"
|
|
}
|
|
|
|
# Patch the 'route' table URL inside the .fl manifest to use read-only key URL
|
|
# Args:
|
|
# $1 = path to .fl file
|
|
rfs_common_patch_flist_route_url() {
|
|
local fl="$1"
|
|
if [[ -z "${RFS_ROUTE_URL:-}" ]]; then
|
|
log_error "RFS_ROUTE_URL is empty; call rfs_common_build_route_url first"
|
|
return 1
|
|
fi
|
|
if [[ ! -f "$fl" ]]; then
|
|
log_error "Manifest file not found: ${fl}"
|
|
return 1
|
|
fi
|
|
|
|
rfs_common_require_sqlite3
|
|
|
|
# Ensure 'route' table exists
|
|
local has_route
|
|
has_route="$(sqlite3 "$fl" "SELECT name FROM sqlite_master WHERE type='table' AND name='route';" || true)"
|
|
if [[ -z "$has_route" ]]; then
|
|
log_error "route table not found in manifest (unexpected schema): ${fl}"
|
|
return 1
|
|
fi
|
|
|
|
log_info "Updating route.url to: ${RFS_ROUTE_URL}"
|
|
sqlite3 "$fl" "UPDATE route SET url='${RFS_ROUTE_URL}';"
|
|
log_info "Patched route URL in manifest: ${fl}"
|
|
}
|
|
# Packaging helpers
|
|
# -----------------------------------------------------------------------------
|
|
|
|
# Ensure output directory exists and echo final manifest path
|
|
# Args:
|
|
# $1 = basename for manifest (e.g., modules-6.12.44-Zero-OS.fl)
|
|
rfs_common_prepare_output() {
|
|
local base="$1"
|
|
local outdir="${PROJECT_ROOT}/dist/flists"
|
|
mkdir -p "$outdir"
|
|
echo "${outdir}/${base}"
|
|
}
|
|
|
|
# Sanitize firmware tag or generate date-based tag (YYYYMMDD)
|
|
rfs_common_firmware_tag() {
|
|
local tag="${FIRMWARE_TAG:-}"
|
|
if [[ -n "$tag" ]]; then
|
|
# Replace path-unfriendly chars
|
|
tag="${tag//[^A-Za-z0-9._-]/_}"
|
|
echo "$tag"
|
|
else
|
|
date -u +%Y%m%d
|
|
fi
|
|
}
|
|
|
|
# If executed directly, show a quick status summary
|
|
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
|
|
log_info "rfs-common self-check..."
|
|
rfs_common_load_build_kernel_version
|
|
rfs_common_load_rfs_s3_config
|
|
rfs_common_build_s3_store_uri
|
|
rfs_common_locate_rfs
|
|
rfs_common_locate_modules_dir "${FULL_KERNEL_VERSION}"
|
|
rfs_common_validate_modules_metadata
|
|
rfs_common_locate_firmware_dir
|
|
log_info "All checks passed."
|
|
log_info "FULL_KERNEL_VERSION=${FULL_KERNEL_VERSION}"
|
|
log_info "RFS_S3_STORE_URI=${RFS_S3_STORE_URI}"
|
|
log_info "MODULES_DIR=${MODULES_DIR}"
|
|
log_info "FIRMWARE_DIR=${FIRMWARE_DIR}"
|
|
log_info "RFS_BIN=${RFS_BIN}"
|
|
fi |