feat: first-draft preview-capable zosstorage

- CLI: add topology selection (-t/--topology), preview flags (--show/--report), and removable policy override (--allow-removable) (src/cli/args.rs)
- Config: built-in sensible defaults; deterministic overlays for logging, fstab, removable, topology (src/config/loader.rs)
- Device: discovery via /proc + /sys with include/exclude regex and removable policy (src/device/discovery.rs)
- Idempotency: detection via blkid; safe emptiness checks (src/idempotency/mod.rs)
- Partition: topology-driven planning (Single, DualIndependent, BtrfsRaid1, SsdHddBcachefs) (src/partition/plan.rs)
- FS: planning + creation (mkfs.vfat, mkfs.btrfs, bcachefs format) and UUID capture via blkid (src/fs/plan.rs)
- Orchestrator: pre-flight with preview JSON (disks, partition_plan, filesystems_planned, mount scheme). Skips emptiness in preview; supports stdout+file (src/orchestrator/run.rs)
- Util/Logging/Types/Errors: process execution, tracing, shared types (src/util/mod.rs, src/logging/mod.rs, src/types.rs, src/errors.rs)
- Docs: add README with exhaustive usage and preview JSON shape (README.md)

Builds and unit tests pass: discovery, util, idempotency helpers, and fs parser tests.
This commit is contained in:
2025-09-29 11:37:07 +02:00
commit 507bc172c2
38 changed files with 6558 additions and 0 deletions

121
src/cli/args.rs Normal file
View File

@@ -0,0 +1,121 @@
// REGION: API
// api: cli::LogLevelArg { Error, Warn, Info, Debug }
// api: cli::Cli { config: Option<String>, log_level: LogLevelArg, log_to_file: bool, fstab: bool, force: bool }
// api: cli::from_args() -> crate::cli::Cli
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Define non-interactive CLI flags mirroring kernel cmdline semantics.
// - Provide a stable parsing entry (from_args) suitable for initramfs.
// Non-goals: config validation, IO, or side effects beyond parsing.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: add --dry-run to support planning without changes (future).
// ext: add --report-path to override JSON report location (future).
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: no interactive prompts; default values are explicit; parsing errors should be clear.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: clap parsing errors are emitted by clap; higher layers should handle exit strategy.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: consider hidden/unstable flags gated by build features for developers.
// REGION: TODO-END
//! CLI definition mirroring kernel cmdline semantics; non-interactive.
use clap::{Parser, ValueEnum};
#[derive(Debug, Clone, Copy, ValueEnum)]
#[value(rename_all = "kebab_case")]
pub enum LogLevelArg {
Error,
Warn,
Info,
Debug,
}
impl std::fmt::Display for LogLevelArg {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
LogLevelArg::Error => "error",
LogLevelArg::Warn => "warn",
LogLevelArg::Info => "info",
LogLevelArg::Debug => "debug",
};
f.write_str(s)
}
}
/// Topology argument (maps to config Topology with snake_case semantics).
#[derive(Debug, Clone, Copy, ValueEnum)]
#[value(rename_all = "kebab_case")]
pub enum TopologyArg {
Single,
DualIndependent,
SsdHddBcachefs,
BtrfsRaid1,
}
impl std::fmt::Display for TopologyArg {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
TopologyArg::Single => "single",
TopologyArg::DualIndependent => "dual_independent",
TopologyArg::SsdHddBcachefs => "ssd_hdd_bcachefs",
TopologyArg::BtrfsRaid1 => "btrfs_raid1",
};
f.write_str(s)
}
}
/// zosstorage - one-shot disk initializer for initramfs.
#[derive(Debug, Parser)]
#[command(name = "zosstorage", disable_help_subcommand = true)]
pub struct Cli {
/// Path to YAML configuration (mirrors kernel cmdline key 'zosstorage.config=')
#[arg(short = 'c', long = "config")]
pub config: Option<String>,
/// Log level: error, warn, info, debug
#[arg(short = 'l', long = "log-level", value_enum, default_value_t = LogLevelArg::Info)]
pub log_level: LogLevelArg,
/// Also log to /run/zosstorage/zosstorage.log
#[arg(short = 'L', long = "log-to-file", default_value_t = false)]
pub log_to_file: bool,
/// Enable writing /etc/fstab entries
#[arg(short = 's', long = "fstab", default_value_t = false)]
pub fstab: bool,
/// Select topology (overrides config topology)
#[arg(short = 't', long = "topology", value_enum)]
pub topology: Option<TopologyArg>,
/// Present but non-functional; returns unimplemented error
#[arg(short = 'f', long = "force")]
pub force: bool,
/// Allow removable devices (e.g., USB sticks) to be considered during discovery
/// Overrides config.device_selection.allow_removable when provided
#[arg(long = "allow-removable", default_value_t = false)]
pub allow_removable: bool,
/// Print detection and planning summary as JSON to stdout (non-default)
#[arg(long = "show", default_value_t = false)]
pub show: bool,
/// Write detection/planning JSON report to the given path (overrides config.report.path)
#[arg(long = "report")]
pub report: Option<String>,
}
/// Parse CLI arguments (non-interactive; suitable for initramfs).
pub fn from_args() -> Cli {
Cli::parse()
}

13
src/cli/mod.rs Normal file
View File

@@ -0,0 +1,13 @@
//! CLI barrel module: re-export concrete args implementation from args.rs
//!
//! Keeps the API stable while avoiding a large mod.rs. See [src/cli/args.rs](args.rs) for details.
//
// REGION: API
// api: cli::args::*
// api: cli::Cli
// api: cli::from_args()
// REGION: API-END
pub mod args;
pub use args::*;

401
src/config/loader.rs Normal file
View File

@@ -0,0 +1,401 @@
//! Configuration loading, merging, and validation (loader).
//!
//! Precedence (highest to lowest):
//! - Kernel cmdline key `zosstorage.config=`
//! - CLI flags
//! - On-disk config file at /etc/zosstorage/config.yaml (if present)
//! - Built-in defaults
//!
//! See [docs/SCHEMA.md](../../docs/SCHEMA.md) for the schema details.
//
// REGION: API
// api: config::load_and_merge(cli: &crate::cli::Cli) -> crate::Result<crate::config::types::Config>
// api: config::validate(cfg: &crate::config::types::Config) -> crate::Result<()>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Load defaults, merge /etc config, optional CLI-referenced YAML, CLI flag overlays,
// and kernel cmdline (zosstorage.config=) into a final Config.
// - Validate structural and semantic correctness early.
// Non-goals: device probing, partitioning, filesystem operations.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: kernel cmdline URI schemes (e.g., http:, data:) can be added here.
// ext: alternate default config location via build-time feature or CLI.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: precedence enforced (kernel > CLI flags > CLI --config > /etc file > defaults).
// safety: reserved GPT names and labels validated to avoid destructive operations later.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: serde_yaml::Error -> Error::Config
// errmap: std::io::Error (file read) -> Error::Config
// errmap: serde_json::Error (merge/convert) -> Error::Other(anyhow)
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: consider environment variable overlays if required.
// REGION: TODO-END
use std::fs;
use std::path::Path;
use crate::{cli::Cli, Error, Result};
use crate::types::*;
use serde_json::{Map, Value};
use base64::Engine as _;
/// Load defaults, merge on-disk config, overlay CLI, and finally kernel cmdline key.
/// Returns a validated Config on success.
///
/// Behavior:
/// - Starts from built-in defaults (documented in docs/SCHEMA.md)
/// - If /etc/zosstorage/config.yaml exists, merge it
/// - If CLI --config is provided, merge that (overrides file defaults)
/// - If kernel cmdline provides `zosstorage.config=...`, merge that last (highest precedence)
/// - Returns Error::Unimplemented when --force is used
pub fn load_and_merge(cli: &Cli) -> Result<Config> {
if cli.force {
return Err(Error::Unimplemented("--force flag is not implemented"));
}
// 1) Start with defaults
let mut merged = to_value(default_config())?;
// 2) Merge default on-disk config if present
let default_cfg_path = "/etc/zosstorage/config.yaml";
if Path::new(default_cfg_path).exists() {
let v = load_yaml_value(default_cfg_path)?;
merge_value(&mut merged, v);
}
// 3) Merge CLI referenced config (if any)
if let Some(cfg_path) = &cli.config {
let v = load_yaml_value(cfg_path)?;
merge_value(&mut merged, v);
}
// 4) Overlay CLI flags (non-path flags)
let cli_overlay = cli_overlay_value(cli);
merge_value(&mut merged, cli_overlay);
// 5) Merge kernel cmdline referenced config (if any)
if let Some(src) = kernel_cmdline_config_source()? {
match src {
KernelConfigSource::Path(kpath) => {
let v = load_yaml_value(&kpath)?;
merge_value(&mut merged, v);
}
KernelConfigSource::Data(yaml) => {
let v: serde_json::Value = serde_yaml::from_str(&yaml)
.map_err(|e| Error::Config(format!("failed to parse YAML from data: URL: {}", e)))?;
merge_value(&mut merged, v);
}
}
}
// Finalize
let cfg: Config = serde_json::from_value(merged).map_err(|e| Error::Other(e.into()))?;
validate(&cfg)?;
Ok(cfg)
}
/// Validate semantic correctness of the configuration.
pub fn validate(cfg: &Config) -> Result<()> {
// Logging
match cfg.logging.level.as_str() {
"error" | "warn" | "info" | "debug" => {}
other => return Err(Error::Validation(format!("invalid logging.level: {other}"))),
}
// Device selection
if cfg.device_selection.include_patterns.is_empty() {
return Err(Error::Validation(
"device_selection.include_patterns must not be empty".into(),
));
}
if cfg.device_selection.min_size_gib == 0 {
return Err(Error::Validation(
"device_selection.min_size_gib must be >= 1".into(),
));
}
// Partitioning
if cfg.partitioning.alignment_mib < 1 {
return Err(Error::Validation(
"partitioning.alignment_mib must be >= 1".into(),
));
}
if cfg.partitioning.bios_boot.enabled && cfg.partitioning.bios_boot.size_mib < 1 {
return Err(Error::Validation(
"partitioning.bios_boot.size_mib must be >= 1 when enabled".into(),
));
}
if cfg.partitioning.esp.size_mib < 1 {
return Err(Error::Validation(
"partitioning.esp.size_mib must be >= 1".into(),
));
}
// Reserved GPT names
if cfg.partitioning.esp.gpt_name != "zosboot" {
return Err(Error::Validation(
"partitioning.esp.gpt_name must be 'zosboot'".into(),
));
}
if cfg.partitioning.data.gpt_name != "zosdata" {
return Err(Error::Validation(
"partitioning.data.gpt_name must be 'zosdata'".into(),
));
}
if cfg.partitioning.cache.gpt_name != "zoscache" {
return Err(Error::Validation(
"partitioning.cache.gpt_name must be 'zoscache'".into(),
));
}
// BIOS boot name is also 'zosboot' per current assumption
if cfg.partitioning.bios_boot.gpt_name != "zosboot" {
return Err(Error::Validation(
"partitioning.bios_boot.gpt_name must be 'zosboot'".into(),
));
}
// Reserved filesystem labels
if cfg.filesystem.vfat.label != "ZOSBOOT" {
return Err(Error::Validation(
"filesystem.vfat.label must be 'ZOSBOOT'".into(),
));
}
if cfg.filesystem.btrfs.label != "ZOSDATA" {
return Err(Error::Validation(
"filesystem.btrfs.label must be 'ZOSDATA'".into(),
));
}
if cfg.filesystem.bcachefs.label != "ZOSDATA" {
return Err(Error::Validation(
"filesystem.bcachefs.label must be 'ZOSDATA'".into(),
));
}
// Mount scheme
if cfg.mount.base_dir.trim().is_empty() {
return Err(Error::Validation("mount.base_dir must not be empty".into()));
}
// Topology-specific quick checks (basic for now)
match cfg.topology {
Topology::Single => {} // nothing special
Topology::DualIndependent => {}
Topology::SsdHddBcachefs => {}
Topology::BtrfsRaid1 => {
// No enforced requirement here beyond presence of two disks at runtime.
if cfg.filesystem.btrfs.raid_profile != "raid1" && cfg.filesystem.btrfs.raid_profile != "none" {
return Err(Error::Validation(
"filesystem.btrfs.raid_profile must be 'none' or 'raid1'".into(),
));
}
}
}
// Report path
if cfg.report.path.trim().is_empty() {
return Err(Error::Validation("report.path must not be empty".into()));
}
Ok(())
}
// ----------------------- helpers -----------------------
fn to_value<T: serde::Serialize>(t: T) -> Result<Value> {
serde_json::to_value(t).map_err(|e| Error::Other(e.into()))
}
fn load_yaml_value(path: &str) -> Result<Value> {
let s = fs::read_to_string(path)
.map_err(|e| Error::Config(format!("failed to read config file {}: {}", path, e)))?;
// Load as generic serde_json::Value for merging flexibility
let v: serde_json::Value = serde_yaml::from_str(&s)
.map_err(|e| Error::Config(format!("failed to parse YAML {}: {}", path, e)))?;
Ok(v)
}
/// Merge b into a in-place:
/// - Objects are merged key-by-key (recursively)
/// - Arrays and scalars replace
fn merge_value(a: &mut Value, b: Value) {
match (a, b) {
(Value::Object(a_map), Value::Object(b_map)) => {
for (k, v) in b_map {
match a_map.get_mut(&k) {
Some(a_sub) => merge_value(a_sub, v),
None => {
a_map.insert(k, v);
}
}
}
}
(a_slot, b_other) => {
*a_slot = b_other;
}
}
}
/// Produce a JSON overlay from CLI flags.
/// Only sets fields that should override defaults when present.
fn cli_overlay_value(cli: &Cli) -> Value {
let mut root = Map::new();
// logging overrides (always overlay CLI values for determinism)
let mut logging = Map::new();
logging.insert("level".into(), Value::String(cli.log_level.to_string()));
logging.insert("to_file".into(), Value::Bool(cli.log_to_file));
root.insert("logging".into(), Value::Object(logging));
// mount.fstab_enabled via --fstab
if cli.fstab {
let mut mount = Map::new();
mount.insert("fstab_enabled".into(), Value::Bool(true));
root.insert("mount".into(), Value::Object(mount));
}
// device_selection.allow_removable via --allow-removable
if cli.allow_removable {
let mut device_selection = Map::new();
device_selection.insert("allow_removable".into(), Value::Bool(true));
root.insert("device_selection".into(), Value::Object(device_selection));
}
// topology override via --topology
if let Some(t) = cli.topology {
root.insert("topology".into(), Value::String(t.to_string()));
}
Value::Object(root)
}
enum KernelConfigSource {
Path(String),
/// Raw YAML from a data: URL payload after decoding (if base64-encoded).
Data(String),
}
/// Resolve a config from kernel cmdline key `zosstorage.config=`.
/// Supports:
/// - absolute paths (e.g., /run/zos.yaml)
/// - file:/absolute/path
/// - data:application/x-yaml;base64,BASE64CONTENT
/// Returns Ok(None) when key absent.
fn kernel_cmdline_config_source() -> Result<Option<KernelConfigSource>> {
let cmdline = fs::read_to_string("/proc/cmdline").unwrap_or_default();
for token in cmdline.split_whitespace() {
if let Some(rest) = token.strip_prefix("zosstorage.config=") {
let mut val = rest.to_string();
// Trim surrounding quotes if any
if (val.starts_with('"') && val.ends_with('"')) || (val.starts_with('\'') && val.ends_with('\'')) {
val = val[1..val.len() - 1].to_string();
}
if let Some(path) = val.strip_prefix("file:") {
return Ok(Some(KernelConfigSource::Path(path.to_string())));
}
if let Some(data_url) = val.strip_prefix("data:") {
// data:[<mediatype>][;base64],<data>
// Find comma separating the header and payload
if let Some(idx) = data_url.find(',') {
let (header, payload) = data_url.split_at(idx);
let payload = &payload[1..]; // skip the comma
let is_base64 = header.split(';').any(|seg| seg.eq_ignore_ascii_case("base64"));
let yaml = if is_base64 {
let decoded = base64::engine::general_purpose::STANDARD
.decode(payload.as_bytes())
.map_err(|e| Error::Config(format!("invalid base64 in data: URL: {}", e)))?;
String::from_utf8(decoded)
.map_err(|e| Error::Config(format!("data: URL payload not UTF-8: {}", e)))?
} else {
payload.to_string()
};
return Ok(Some(KernelConfigSource::Data(yaml)));
} else {
return Err(Error::Config("malformed data: URL (missing comma)".into()));
}
}
// Treat as direct path
return Ok(Some(KernelConfigSource::Path(val)));
}
}
Ok(None)
}
/// Built-in defaults for the entire configuration (schema version 1).
fn default_config() -> Config {
Config {
version: 1,
logging: LoggingConfig {
level: "info".into(),
to_file: false,
},
device_selection: DeviceSelection {
include_patterns: vec![
String::from(r"^/dev/sd\w+$"),
String::from(r"^/dev/nvme\w+n\d+$"),
String::from(r"^/dev/vd\w+$"),
],
exclude_patterns: vec![
String::from(r"^/dev/ram\d+$"),
String::from(r"^/dev/zram\d+$"),
String::from(r"^/dev/loop\d+$"),
String::from(r"^/dev/fd\d+$"),
],
allow_removable: false,
min_size_gib: 10,
},
topology: Topology::Single,
partitioning: Partitioning {
alignment_mib: 1,
require_empty_disks: true,
bios_boot: BiosBootSpec {
enabled: true,
size_mib: 1,
gpt_name: "zosboot".into(),
},
esp: EspSpec {
size_mib: 512,
label: "ZOSBOOT".into(),
gpt_name: "zosboot".into(),
},
data: DataSpec {
gpt_name: "zosdata".into(),
},
cache: CacheSpec {
gpt_name: "zoscache".into(),
},
},
filesystem: FsOptions {
btrfs: BtrfsOptions {
label: "ZOSDATA".into(),
compression: "zstd:3".into(),
raid_profile: "none".into(),
},
bcachefs: BcachefsOptions {
label: "ZOSDATA".into(),
cache_mode: "promote".into(),
compression: "zstd".into(),
checksum: "crc32c".into(),
},
vfat: VfatOptions {
label: "ZOSBOOT".into(),
},
},
mount: MountScheme {
base_dir: "/var/cache".into(),
scheme: MountSchemeKind::PerUuid,
fstab_enabled: false,
},
report: ReportOptions {
path: "/run/zosstorage/state.json".into(),
},
}
}

15
src/config/mod.rs Normal file
View File

@@ -0,0 +1,15 @@
//! Configuration module barrel.
//!
//! This module re-exports the config types and the loader/validator so callers
//! can `use zosstorage::config::*;` without caring about file layout.
//
// REGION: API
// api: config::types::*
// api: config::load_and_merge(cli: &crate::cli::Cli) -> crate::Result<crate::config::types::Config>
// api: config::validate(cfg: &crate::config::types::Config) -> crate::Result<()>
// REGION: API-END
pub mod loader;
pub use loader::{load_and_merge, validate};
pub use crate::types::*;

366
src/device/discovery.rs Normal file
View File

@@ -0,0 +1,366 @@
// REGION: API
// api: device::Disk { path: String, size_bytes: u64, rotational: bool, model: Option<String>, serial: Option<String> }
// api: device::DeviceFilter { include: Vec<regex::Regex>, exclude: Vec<regex::Regex>, min_size_gib: u64 }
// api: device::DeviceProvider::list_block_devices(&self) -> crate::Result<Vec<Disk>>
// api: device::DeviceProvider::probe_properties(&self, disk: &mut Disk) -> crate::Result<()>
// api: device::discover(filter: &DeviceFilter) -> crate::Result<Vec<Disk>>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Enumerate candidate block devices under /dev.
// - Filter using include/exclude regex and minimum size threshold.
// - Probe device properties (size, rotational, model, serial).
// Non-goals: partitioning, mkfs, or mounting.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: pluggable DeviceProvider to allow mocking in tests and alternative discovery backends.
// ext: future allowlist policies for removable media, device classes, or path patterns.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: must not modify devices; read-only probing only.
// safety: ensure pseudodevices (/dev/ram*, /dev/zram*, /dev/loop*, /dev/fd*, /dev/dm-*, /dev/md*) are excluded by default.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: IO and parsing errors -> crate::Error::Device with context.
// REGION: ERROR_MAPPING-END
//! Device discovery and filtering for zosstorage.
//!
//! Exposes abstractions to enumerate and filter block devices under /dev,
//! with compiled include/exclude regexes and size thresholds.
//!
//! See device::Disk and device::discover.
#![allow(dead_code)]
use crate::{Error, Result};
use regex::Regex;
use std::fs;
use std::path::{Path, PathBuf};
use tracing::{debug, trace, warn};
/// Eligible block device discovered on the system.
#[derive(Debug, Clone)]
pub struct Disk {
/// Absolute device path (e.g., "/dev/nvme0n1").
pub path: String,
/// Device size in bytes.
pub size_bytes: u64,
/// True for spinning disks; false for SSD/NVMe when detectable.
pub rotational: bool,
/// Optional model string (if available).
pub model: Option<String>,
/// Optional serial string (if available).
pub serial: Option<String>,
}
/// Compiled device filters derived from configuration patterns.
#[derive(Debug, Clone)]
pub struct DeviceFilter {
/// Inclusion regexes (any match qualifies). If empty, default include any.
pub include: Vec<Regex>,
/// Exclusion regexes (any match disqualifies).
pub exclude: Vec<Regex>,
/// Minimum size in GiB to consider eligible.
pub min_size_gib: u64,
/// Allow removable devices (e.g., USB sticks). Default false.
pub allow_removable: bool,
}
impl DeviceFilter {
fn matches(&self, dev_path: &str, size_bytes: u64) -> bool {
// size filter
let size_gib = size_bytes as f64 / 1073741824.0;
if size_gib < self.min_size_gib as f64 {
return false;
}
// include filter
if !self.include.is_empty() {
if !self.include.iter().any(|re| re.is_match(dev_path)) {
return false;
}
}
// exclude filter
if self.exclude.iter().any(|re| re.is_match(dev_path)) {
return false;
}
true
}
}
/// Abstract provider to enable testing without real /dev access.
pub trait DeviceProvider {
/// List candidate block devices (whole disks only; not partitions).
fn list_block_devices(&self) -> Result<Vec<Disk>>;
/// Probe and update additional properties for a disk.
fn probe_properties(&self, _disk: &mut Disk) -> Result<()> {
Ok(())
}
}
/// System-backed provider using /proc and /sys for discovery.
struct SysProvider;
impl SysProvider {
fn new() -> Self {
SysProvider
}
}
impl DeviceProvider for SysProvider {
fn list_block_devices(&self) -> Result<Vec<Disk>> {
let mut disks = Vec::new();
let content = fs::read_to_string("/proc/partitions")
.map_err(|e| Error::Device(format!("/proc/partitions read error: {}", e)))?;
for line in content.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with("major") {
continue;
}
// Format: major minor #blocks name
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let name = parts[3];
// Exclude common pseudo and virtual device names
if is_ignored_name(name) {
trace!("skipping pseudo/ignored device: {}", name);
continue;
}
// Skip partitions; we want whole-disk devices only
if is_partition_sysfs(name) {
trace!("skipping partition device: {}", name);
continue;
}
// Ensure /dev node exists
let dev_path = format!("/dev/{}", name);
if !Path::new(&dev_path).exists() {
trace!("skipping: missing device node {}", dev_path);
continue;
}
// Read size in 512-byte sectors from sysfs, then convert to bytes
let size_bytes = match read_disk_size_bytes(name) {
Ok(sz) => sz,
Err(e) => {
warn!("failed to read size for {}: {}", name, e);
continue;
}
};
let rotational = read_rotational(name).unwrap_or(false);
let (model, serial) = read_model_serial(name);
let disk = Disk {
path: dev_path,
size_bytes,
rotational,
model,
serial,
};
disks.push(disk);
}
Ok(disks)
}
fn probe_properties(&self, _disk: &mut Disk) -> Result<()> {
// Properties are filled during enumeration above.
Ok(())
}
}
/// Discover eligible disks according to the filter policy.
///
/// Returns Error::Device when no eligible disks are found.
pub fn discover(filter: &DeviceFilter) -> Result<Vec<Disk>> {
let provider = SysProvider::new();
discover_with_provider(&provider, filter)
}
fn discover_with_provider<P: DeviceProvider>(provider: &P, filter: &DeviceFilter) -> Result<Vec<Disk>> {
let mut candidates = provider.list_block_devices()?;
// Probe properties if provider needs to enrich
for d in &mut candidates {
provider.probe_properties(d)?;
}
// Apply filters (including removable policy)
let filtered: Vec<Disk> = candidates
.into_iter()
.filter(|d| {
if !filter.allow_removable {
if let Some(name) = base_name(&d.path) {
if is_removable_sysfs(&name).unwrap_or(false) {
trace!("excluding removable device by policy: {}", d.path);
return false;
}
}
}
filter.matches(&d.path, d.size_bytes)
})
.collect();
if filtered.is_empty() {
return Err(Error::Device("no eligible disks found after applying filters".to_string()));
}
debug!("eligible disks: {:?}", filtered.iter().map(|d| &d.path).collect::<Vec<_>>());
Ok(filtered)
}
// =========================
// Sysfs helper functions
// =========================
fn is_ignored_name(name: &str) -> bool {
// Pseudo and virtual device common patterns
name.starts_with("loop")
|| name.starts_with("ram")
|| name.starts_with("zram")
|| name.starts_with("fd")
|| name.starts_with("dm-")
|| name.starts_with("md")
|| name.starts_with("sr")
}
fn sys_block_path(name: &str) -> PathBuf {
PathBuf::from(format!("/sys/class/block/{}", name))
}
fn base_name(dev_path: &str) -> Option<String> {
Path::new(dev_path)
.file_name()
.map(|s| s.to_string_lossy().to_string())
}
/// Returns Ok(true) if /sys/class/block/<name>/removable == "1"
fn is_removable_sysfs(name: &str) -> Result<bool> {
let p = sys_block_path(name).join("removable");
let s = fs::read_to_string(&p)
.map_err(|e| Error::Device(format!("read {} failed: {}", p.display(), e)))?;
Ok(s.trim() == "1")
}
fn is_partition_sysfs(name: &str) -> bool {
let p = sys_block_path(name).join("partition");
p.exists()
}
fn read_disk_size_bytes(name: &str) -> Result<u64> {
let p = sys_block_path(name).join("size");
let sectors = fs::read_to_string(&p)
.map_err(|e| Error::Device(format!("read {} failed: {}", p.display(), e)))?;
let sectors: u64 = sectors.trim().parse().map_err(|e| {
Error::Device(format!("parse sectors for {} failed: {}", name, e))
})?;
Ok(sectors.saturating_mul(512))
}
fn read_rotational(name: &str) -> Result<bool> {
let p = sys_block_path(name).join("queue/rotational");
let s = fs::read_to_string(&p)
.map_err(|e| Error::Device(format!("read {} failed: {}", p.display(), e)))?;
Ok(s.trim() == "1")
}
fn read_model_serial(name: &str) -> (Option<String>, Option<String>) {
let base = sys_block_path(name).join("device");
let model = read_optional_string(base.join("model"));
// Some devices expose "vendor" + "model"; if model missing, try "device/model" anyway
let serial = read_optional_string(base.join("serial"));
(model, serial)
}
fn read_optional_string(p: PathBuf) -> Option<String> {
match fs::read_to_string(&p) {
Ok(mut s) => {
// Trim trailing newline/spaces
while s.ends_with('\n') || s.ends_with('\r') {
s.pop();
}
if s.is_empty() {
None
} else {
Some(s)
}
}
Err(_) => None,
}
}
// =========================
// Tests (mock provider)
// =========================
#[cfg(test)]
mod tests {
use super::*;
use regex::Regex;
struct MockProvider {
disks: Vec<Disk>,
}
impl DeviceProvider for MockProvider {
fn list_block_devices(&self) -> Result<Vec<Disk>> {
Ok(self.disks.clone())
}
}
fn re(s: &str) -> Regex {
Regex::new(s).unwrap()
}
#[test]
fn filter_by_size_and_include_exclude() {
let provider = MockProvider {
disks: vec![
Disk { path: "/dev/sda".into(), size_bytes: 500 * 1024 * 1024 * 1024, rotational: true, model: None, serial: None }, // 500 GiB
Disk { path: "/dev/nvme0n1".into(), size_bytes: 128 * 1024 * 1024 * 1024, rotational: false, model: None, serial: None }, // 128 GiB
Disk { path: "/dev/loop0".into(), size_bytes: 8 * 1024 * 1024 * 1024, rotational: false, model: None, serial: None }, // 8 GiB pseudo (but mock provider supplies it)
],
};
let filter = DeviceFilter {
include: vec![re(r"^/dev/(sd|nvme)")],
exclude: vec![re(r"/dev/loop")],
min_size_gib: 200, // >= 200 GiB
allow_removable: true,
};
let out = discover_with_provider(&provider, &filter).expect("discover ok");
assert_eq!(out.len(), 1);
assert_eq!(out[0].path, "/dev/sda");
}
#[test]
fn no_match_returns_error() {
let provider = MockProvider {
disks: vec![
Disk { path: "/dev/sdb".into(), size_bytes: 50 * 1024 * 1024 * 1024, rotational: true, model: None, serial: None }, // 50 GiB
],
};
let filter = DeviceFilter {
include: vec![re(r"^/dev/nvme")],
exclude: vec![],
min_size_gib: 200,
allow_removable: true,
};
let err = discover_with_provider(&provider, &filter).unwrap_err();
match err {
Error::Device(msg) => assert!(msg.contains("no eligible disks")),
other => panic!("unexpected error: {:?}", other),
}
}
}

12
src/device/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
//! Device module barrel.
//!
//! Re-exports the concrete discovery implementation from discovery.rs to avoid a large mod.rs.
//! See [src/device/discovery.rs](discovery.rs) for details.
//
// REGION: API
// api: device::discovery::*
// REGION: API-END
pub mod discovery;
pub use discovery::*;

56
src/errors.rs Normal file
View File

@@ -0,0 +1,56 @@
//! Common error types and result alias for zosstorage.
use thiserror::Error as ThisError;
/// Top-level error for zosstorage covering configuration, validation,
/// device discovery, partitioning, filesystem, mounting, reporting,
/// and external tool invocation failures.
#[derive(Debug, ThisError)]
pub enum Error {
/// Invalid or malformed configuration input.
#[error("configuration error: {0}")]
Config(String),
/// Semantic validation failure.
#[error("validation error: {0}")]
Validation(String),
/// Errors related to device discovery and probing.
#[error("device discovery error: {0}")]
Device(String),
/// Partitioning or GPT manipulation failures.
#[error("partitioning error: {0}")]
Partition(String),
/// Filesystem creation or probing failures.
#[error("filesystem error: {0}")]
Filesystem(String),
/// Mount operation failures.
#[error("mount error: {0}")]
Mount(String),
/// State report construction or write failures.
#[error("report error: {0}")]
Report(String),
/// External system tool invocation failure with captured stderr.
#[error("external tool '{tool}' failed with status {status}: {stderr}")]
Tool {
tool: String,
status: i32,
stderr: String,
},
/// Placeholder for not-yet-implemented functionality (e.g., --force).
#[error("unimplemented: {0}")]
Unimplemented(&'static str),
/// Any other error wrapped with context.
#[error(transparent)]
Other(#[from] anyhow::Error),
}
/// Crate-wide result alias.
pub type Result<T> = std::result::Result<T, Error>;

12
src/fs/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
//! Filesystem module barrel.
//!
//! Re-exports the concrete planning/creation implementation from plan.rs to avoid a large mod.rs.
//! See [src/fs/plan.rs](plan.rs) for details.
//
// REGION: API
// api: fs::plan::*
// REGION: API-END
pub mod plan;
pub use plan::*;

308
src/fs/plan.rs Normal file
View File

@@ -0,0 +1,308 @@
// REGION: API
// api: fs::FsKind { Vfat, Btrfs, Bcachefs }
// api: fs::FsSpec { kind: FsKind, devices: Vec<String>, label: String }
// api: fs::FsPlan { specs: Vec<FsSpec> }
// api: fs::FsResult { kind: FsKind, devices: Vec<String>, uuid: String, label: String }
// api: fs::plan_filesystems(parts: &[crate::partition::PartitionResult], cfg: &crate::config::types::Config) -> crate::Result<FsPlan>
// api: fs::make_filesystems(plan: &FsPlan) -> crate::Result<Vec<FsResult>>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Map partition roles to concrete filesystems (vfat for ESP, btrfs for data, bcachefs for SSD+HDD).
// - Execute mkfs operations via external tooling wrappers and capture resulting UUIDs/labels.
// Non-goals: partition layout decisions, mount orchestration, device discovery.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: support additional filesystems or tuning flags through Config (e.g., more btrfs/bcachefs options).
// ext: dry-run mode to emit mkfs commands without executing (future).
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: must not run mkfs on non-empty or unexpected partitions; assume prior validation enforced.
// safety: ensure labels follow reserved semantics (ZOSBOOT for ESP, ZOSDATA for all data FS).
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: external mkfs/blkid failures -> crate::Error::Tool with captured stderr.
// errmap: planning mismatches -> crate::Error::Filesystem with context.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement mapping of topology to FsSpec including bcachefs cache/backing composition.
// todo: implement mkfs invocation and UUID capture via util::run_cmd / util::run_cmd_capture.
// REGION: TODO-END
//! Filesystem planning and creation for zosstorage.
//!
//! Maps partition results to concrete filesystems (vfat, btrfs, bcachefs)
//! and executes mkfs operations via external tooling wrappers.
//!
//! See [fn plan_filesystems](plan.rs:1) and
//! [fn make_filesystems](plan.rs:1).
use crate::{
Result,
types::{Config, Topology},
partition::{PartitionResult, PartRole},
util::{run_cmd, run_cmd_capture, which_tool},
Error,
};
use tracing::{debug, warn};
/// Filesystem kinds supported by zosstorage.
#[derive(Debug, Clone, Copy)]
pub enum FsKind {
/// FAT32 for the EFI System Partition.
Vfat,
/// Btrfs for data filesystems in single/dual topologies.
Btrfs,
/// Bcachefs for SSD+HDD topology (SSD as cache/promote, HDD backing).
Bcachefs,
}
/// Declarative specification for creating a filesystem.
#[derive(Debug, Clone)]
pub struct FsSpec {
/// Filesystem kind.
pub kind: FsKind,
/// Source device(s):
/// - single path for vfat and btrfs
/// - two paths for bcachefs (cache + backing)
pub devices: Vec<String>,
/// Filesystem label:
/// - "ZOSBOOT" for ESP
/// - "ZOSDATA" for all data filesystems
pub label: String,
}
/// Plan of filesystem creations.
#[derive(Debug, Clone)]
pub struct FsPlan {
/// All filesystem creation specs.
pub specs: Vec<FsSpec>,
}
/// Result of creating a filesystem.
#[derive(Debug, Clone)]
pub struct FsResult {
/// Filesystem kind.
pub kind: FsKind,
/// Devices the filesystem was created on.
pub devices: Vec<String>,
/// Filesystem UUID (string as reported by blkid or related).
pub uuid: String,
/// Filesystem label ("ZOSBOOT" or "ZOSDATA").
pub label: String,
}
/**
Determine which partitions get which filesystem based on topology.
Rules:
- ESP partitions => Vfat with label from cfg.filesystem.vfat.label (reserved "ZOSBOOT")
- Data partitions => Btrfs with label cfg.filesystem.btrfs.label ("ZOSDATA"), unless topology SsdHddBcachefs
- SsdHddBcachefs => pair one Cache partition (SSD) with one Data partition (HDD) into one Bcachefs FsSpec with devices [cache, data] and label cfg.filesystem.bcachefs.label ("ZOSDATA")
- DualIndependent/BtrfsRaid1 => map each Data partition to its own Btrfs FsSpec (raid profile concerns are handled later during mkfs)
*/
pub fn plan_filesystems(
parts: &[PartitionResult],
cfg: &Config,
) -> Result<FsPlan> {
let mut specs: Vec<FsSpec> = Vec::new();
// Always map ESP partitions
for p in parts.iter().filter(|p| matches!(p.role, PartRole::Esp)) {
specs.push(FsSpec {
kind: FsKind::Vfat,
devices: vec![p.device_path.clone()],
label: cfg.filesystem.vfat.label.clone(),
});
}
match cfg.topology {
Topology::SsdHddBcachefs => {
// Expect exactly one cache (SSD) and at least one data (HDD). Use the first data for pairing.
let cache = parts.iter().find(|p| matches!(p.role, PartRole::Cache))
.ok_or_else(|| Error::Filesystem("expected a Cache partition for SsdHddBcachefs topology".to_string()))?;
let data = parts.iter().find(|p| matches!(p.role, PartRole::Data))
.ok_or_else(|| Error::Filesystem("expected a Data partition for SsdHddBcachefs topology".to_string()))?;
specs.push(FsSpec {
kind: FsKind::Bcachefs,
devices: vec![cache.device_path.clone(), data.device_path.clone()],
label: cfg.filesystem.bcachefs.label.clone(),
});
}
Topology::BtrfsRaid1 => {
// Group all Data partitions into a single Btrfs filesystem across multiple devices.
let data_devs: Vec<String> = parts
.iter()
.filter(|p| matches!(p.role, PartRole::Data))
.map(|p| p.device_path.clone())
.collect();
if data_devs.len() < 2 {
return Err(Error::Filesystem(
"BtrfsRaid1 topology requires at least 2 data partitions".to_string(),
));
}
specs.push(FsSpec {
kind: FsKind::Btrfs,
devices: data_devs,
label: cfg.filesystem.btrfs.label.clone(),
});
}
_ => {
// Map each Data partition to individual Btrfs filesystems.
for p in parts.iter().filter(|p| matches!(p.role, PartRole::Data)) {
specs.push(FsSpec {
kind: FsKind::Btrfs,
devices: vec![p.device_path.clone()],
label: cfg.filesystem.btrfs.label.clone(),
});
}
}
}
if specs.is_empty() {
return Err(Error::Filesystem("no filesystems to create from provided partitions".to_string()));
}
Ok(FsPlan { specs })
}
/// Create the filesystems and return identity info (UUIDs, labels).
///
//// Uses external tooling via util wrappers (mkfs.vfat, mkfs.btrfs, bcachefs format).
/// Notes:
/// - This initial implementation applies labels and creates filesystems with minimal flags.
/// - Btrfs RAID profile (e.g., raid1) will be applied in a follow-up by mapping config to mkfs flags.
/// - UUID is captured via blkid -o export on the first device of each spec.
pub fn make_filesystems(plan: &FsPlan) -> Result<Vec<FsResult>> {
// Discover required tools up-front
let vfat_tool = which_tool("mkfs.vfat")?;
let btrfs_tool = which_tool("mkfs.btrfs")?;
let bcachefs_tool = which_tool("bcachefs")?;
let blkid_tool = which_tool("blkid")?;
if blkid_tool.is_none() {
return Err(Error::Filesystem("blkid not found in PATH; cannot capture filesystem UUIDs".into()));
}
let blkid = blkid_tool.unwrap();
let mut results: Vec<FsResult> = Vec::new();
for spec in &plan.specs {
match spec.kind {
FsKind::Vfat => {
let Some(ref mkfs) = vfat_tool else {
return Err(Error::Filesystem("mkfs.vfat not found in PATH".into()));
};
if spec.devices.len() != 1 {
return Err(Error::Filesystem("vfat requires exactly one device".into()));
}
let dev = &spec.devices[0];
// mkfs.vfat -n LABEL /dev/...
run_cmd(&[mkfs.as_str(), "-n", spec.label.as_str(), dev.as_str()])?;
// Capture UUID
let uuid = capture_uuid(&blkid, dev)?;
results.push(FsResult {
kind: FsKind::Vfat,
devices: vec![dev.clone()],
uuid,
label: spec.label.clone(),
});
}
FsKind::Btrfs => {
let Some(ref mkfs) = btrfs_tool else {
return Err(Error::Filesystem("mkfs.btrfs not found in PATH".into()));
};
if spec.devices.is_empty() {
return Err(Error::Filesystem("btrfs requires at least one device".into()));
}
// mkfs.btrfs -L LABEL dev1 [dev2 ...]
let mut args: Vec<String> = vec![mkfs.clone(), "-L".into(), spec.label.clone()];
args.extend(spec.devices.iter().cloned());
let args_ref: Vec<&str> = args.iter().map(|s| s.as_str()).collect();
run_cmd(&args_ref)?;
// Capture UUID from the first device
let dev0 = &spec.devices[0];
let uuid = capture_uuid(&blkid, dev0)?;
results.push(FsResult {
kind: FsKind::Btrfs,
devices: spec.devices.clone(),
uuid,
label: spec.label.clone(),
});
}
FsKind::Bcachefs => {
let Some(ref mkfs) = bcachefs_tool else {
return Err(Error::Filesystem("bcachefs not found in PATH".into()));
};
if spec.devices.len() < 2 {
return Err(Error::Filesystem("bcachefs requires at least two devices (cache + backing)".into()));
}
// bcachefs format --label LABEL dev_cache dev_backing ...
let mut args: Vec<String> = vec![mkfs.clone(), "format".into(), "--label".into(), spec.label.clone()];
args.extend(spec.devices.iter().cloned());
let args_ref: Vec<&str> = args.iter().map(|s| s.as_str()).collect();
run_cmd(&args_ref)?;
// Capture UUID from the first device
let dev0 = &spec.devices[0];
let uuid = capture_uuid(&blkid, dev0)?;
results.push(FsResult {
kind: FsKind::Bcachefs,
devices: spec.devices.clone(),
uuid,
label: spec.label.clone(),
});
}
}
}
debug!("make_filesystems: created {} filesystems", results.len());
Ok(results)
}
fn capture_uuid(blkid: &str, dev: &str) -> Result<String> {
// blkid -o export /dev/...
let out = run_cmd_capture(&[blkid, "-o", "export", dev])?;
let map = parse_blkid_export(&out.stdout);
// Prefer ID_FS_UUID if present, fall back to UUID
if let Some(u) = map.get("ID_FS_UUID") {
return Ok(u.clone());
}
if let Some(u) = map.get("UUID") {
return Ok(u.clone());
}
warn!("blkid did not report UUID for {}", dev);
Err(Error::Filesystem(format!("missing UUID in blkid output for {}", dev)))
}
/// Minimal parser for blkid -o export KEY=VAL lines.
fn parse_blkid_export(s: &str) -> std::collections::HashMap<String, String> {
let mut map = std::collections::HashMap::new();
for line in s.lines() {
if let Some((k, v)) = line.split_once('=') {
map.insert(k.trim().to_string(), v.trim().to_string());
}
}
map
}
#[cfg(test)]
mod tests_parse {
use super::parse_blkid_export;
#[test]
fn parse_export_ok() {
let s = "ID_FS_UUID=abcd-1234\nUUID=abcd-1234\nTYPE=btrfs\n";
let m = parse_blkid_export(s);
assert_eq!(m.get("ID_FS_UUID").unwrap(), "abcd-1234");
assert_eq!(m.get("TYPE").unwrap(), "btrfs");
}
}

284
src/idempotency/mod.rs Normal file
View File

@@ -0,0 +1,284 @@
// REGION: API
// api: idempotency::detect_existing_state() -> crate::Result<Option<crate::report::StateReport>>
// api: idempotency::is_empty_disk(disk: &crate::device::Disk) -> crate::Result<bool>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Detect whether the system is already provisioned by probing GPT names and filesystem labels.
// - Provide safe emptiness checks for target disks before any destructive operations.
// Non-goals: performing changes; this module only inspects state.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: add heuristics for partial provisioning detection with guided remediation (future).
// ext: support caching previous successful run state to speed up detection.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: reads only; must not write to devices. Use blkid and partition table reads where possible.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: probing errors -> crate::Error::Device or crate::Error::Other(anyhow) with context.
// REGION: ERROR_MAPPING-END
//! Idempotency detection and disk emptiness probes.
//!
//! Provides helpers to detect whether the system is already provisioned
//! (based on GPT names and filesystem labels), and to verify that target
//! disks are empty before making any destructive changes.
use crate::{
device::Disk,
report::{StateReport, REPORT_VERSION},
util::{run_cmd_capture, which_tool},
Error, Result,
};
use serde_json::json;
use std::{collections::HashMap, fs, path::Path};
use humantime::format_rfc3339;
use tracing::{debug, warn};
/// Return existing state if system is already provisioned; otherwise None.
///
/// Signals for provisioned state:
/// - Expected GPT partition names present: "zosboot", "zosdata", and optional "zoscache"
/// - Filesystem labels present: "ZOSBOOT" for ESP, "ZOSDATA" for data filesystems
///
/// Implementation notes:
/// - Uses blkid -o export on discovered device nodes from /proc/partitions.
/// - Missing blkid results in Ok(None) (cannot detect safely).
pub fn detect_existing_state() -> Result<Option<StateReport>> {
let Some(blkid) = which_tool("blkid")? else {
warn!("blkid not found; skipping idempotency detection (assuming not provisioned)");
return Ok(None);
};
let names = read_proc_partitions_names()?;
if names.is_empty() {
return Ok(None);
}
let mut partlabel_hits: Vec<serde_json::Value> = Vec::new();
let mut fslabel_hits: Vec<serde_json::Value> = Vec::new();
let mut saw_partlabel_zosboot = false;
let mut saw_partlabel_zosdata = false;
let mut saw_partlabel_zoscache = false;
let mut saw_label_zosboot = false;
let mut saw_label_zosdata = false;
for name in names {
let dev_path = format!("/dev/{}", name);
let args = [blkid.as_str(), "-o", "export", dev_path.as_str()];
let map_opt = match run_cmd_capture(&args) {
Ok(out) => Some(parse_blkid_export(&out.stdout)),
Err(Error::Tool { status, .. }) if status != 0 => {
// Typical when device has no recognizable signature; ignore.
None
}
Err(e) => {
// Unexpected failure; log and continue.
warn!("blkid failed on {}: {:?}", dev_path, e);
None
}
};
if let Some(map) = map_opt {
if let Some(pl) = map.get("PARTLABEL") {
let pl_lc = pl.to_ascii_lowercase();
if pl_lc == "zosboot" {
saw_partlabel_zosboot = true;
partlabel_hits.push(json!({ "device": dev_path, "partlabel": pl }));
} else if pl_lc == "zosdata" {
saw_partlabel_zosdata = true;
partlabel_hits.push(json!({ "device": dev_path, "partlabel": pl }));
} else if pl_lc == "zoscache" {
saw_partlabel_zoscache = true;
partlabel_hits.push(json!({ "device": dev_path, "partlabel": pl }));
}
}
if let Some(lbl) = map.get("LABEL") {
if lbl == "ZOSBOOT" {
saw_label_zosboot = true;
fslabel_hits.push(json!({ "device": dev_path, "label": lbl }));
} else if lbl == "ZOSDATA" {
saw_label_zosdata = true;
fslabel_hits.push(json!({ "device": dev_path, "label": lbl }));
}
}
}
}
// Consider provisioned when we see both boot and data signals.
let boot_ok = saw_partlabel_zosboot || saw_label_zosboot;
let data_ok = saw_partlabel_zosdata || saw_label_zosdata;
if boot_ok && data_ok {
let ts = format_rfc3339(std::time::SystemTime::now()).to_string();
let report = StateReport {
version: REPORT_VERSION.to_string(),
timestamp: ts,
status: "already_provisioned".to_string(),
disks: vec![], // can be enriched later
partitions: partlabel_hits,
filesystems: fslabel_hits,
mounts: vec![],
error: None,
};
debug!(
"idempotency: already provisioned (boot_ok={}, data_ok={}, cache={})",
boot_ok, data_ok, saw_partlabel_zoscache
);
return Ok(Some(report));
}
Ok(None)
}
/// Determine if a disk is empty (no partitions and no known filesystem signatures).
///
/// Algorithm:
/// - Parse /proc/partitions for any child partitions of the base device name.
/// - Probe with blkid -p -o export on the whole-disk node:
/// - Exit status 0 => recognized signature (PTTYPE or FS) -> not empty
/// - Exit status 2 (typically "nothing found") -> treat as empty
/// - Missing blkid -> conservative: not empty (return Ok(false))
pub fn is_empty_disk(disk: &Disk) -> Result<bool> {
let base = base_name(&disk.path)
.ok_or_else(|| Error::Device(format!("invalid disk path: {}", disk.path)))?;
// Check for any child partitions listed in /proc/partitions.
let names = read_proc_partitions_names()?;
if names.iter().any(|n| is_partition_of(&base, n)) {
debug!("disk {} has child partitions -> not empty", disk.path);
return Ok(false);
}
// Probe with blkid -p
let Some(blkid) = which_tool("blkid")? else {
warn!("blkid not found; conservatively treating {} as not empty", disk.path);
return Ok(false);
};
let args = [blkid.as_str(), "-p", "-o", "export", disk.path.as_str()];
match run_cmd_capture(&args) {
Ok(_out) => {
// Some signature recognized (filesystem or partition table)
debug!("blkid found signatures on {} -> not empty", disk.path);
Ok(false)
}
Err(Error::Tool { status, .. }) => {
if status == 2 {
// Nothing recognized by blkid
debug!("blkid reports no signatures on {} -> empty", disk.path);
Ok(true)
} else {
Err(Error::Device(format!(
"blkid unexpected status {} probing {}",
status, disk.path
)))
}
}
Err(e) => Err(Error::Device(format!(
"blkid probing error on {}: {}",
disk.path, e
))),
}
}
// =========================
// Helpers (module-private)
// =========================
fn parse_blkid_export(s: &str) -> HashMap<String, String> {
let mut map = HashMap::new();
for line in s.lines() {
if let Some((k, v)) = line.split_once('=') {
map.insert(k.trim().to_string(), v.trim().to_string());
}
}
map
}
fn read_proc_partitions_names() -> Result<Vec<String>> {
let mut names = Vec::new();
let content = fs::read_to_string("/proc/partitions")
.map_err(|e| Error::Device(format!("/proc/partitions read error: {}", e)))?;
for line in content.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with("major") {
continue;
}
// Format: major minor #blocks name
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let name = parts[3].to_string();
// Skip pseudo devices commonly not relevant (loop, ram, zram, fd)
if name.starts_with("loop")
|| name.starts_with("ram")
|| name.starts_with("zram")
|| name.starts_with("fd")
{
continue;
}
names.push(name);
}
Ok(names)
}
fn base_name(path: &str) -> Option<String> {
Path::new(path)
.file_name()
.map(|s| s.to_string_lossy().to_string())
}
fn is_partition_of(base: &str, name: &str) -> bool {
if name == base {
return false;
}
let ends_with_digit = base.chars().last().map(|c| c.is_ascii_digit()).unwrap_or(false);
if ends_with_digit {
// nvme0n1 -> nvme0n1p1
if name.starts_with(base) {
let rest = &name[base.len()..];
return rest.starts_with('p') && rest[1..].chars().all(|c| c.is_ascii_digit());
}
false
} else {
// sda -> sda1
name.starts_with(base) && name[base.len()..].chars().all(|c| c.is_ascii_digit())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_blkid_export_basic() {
let s = "ID_FS_LABEL=ZOSDATA\nPARTLABEL=zosdata\nUUID=1234-ABCD\n";
let m = parse_blkid_export(s);
assert_eq!(m.get("ID_FS_LABEL").unwrap(), "ZOSDATA");
assert_eq!(m.get("PARTLABEL").unwrap(), "zosdata");
assert_eq!(m.get("UUID").unwrap(), "1234-ABCD");
}
#[test]
fn is_partition_of_cases_sda_style() {
// sda base: partitions sda1, sda2 are children; sdb is not
assert!(is_partition_of("sda", "sda1"));
assert!(is_partition_of("sda", "sda10"));
assert!(!is_partition_of("sda", "sda"));
assert!(!is_partition_of("sda", "sdb1"));
}
#[test]
fn is_partition_of_cases_nvme_style() {
// nvme0n1 base: partitions nvme0n1p1, nvme0n1p10 are children; nvme0n2p1 is not
assert!(is_partition_of("nvme0n1", "nvme0n1p1"));
assert!(is_partition_of("nvme0n1", "nvme0n1p10"));
assert!(!is_partition_of("nvme0n1", "nvme0n1"));
assert!(!is_partition_of("nvme0n1", "nvme0n2p1"));
}
}

20
src/lib.rs Normal file
View File

@@ -0,0 +1,20 @@
//! Crate root for zosstorage: one-shot disk provisioning utility for initramfs.
pub mod cli;
pub mod logging;
pub mod config;
pub mod device;
pub mod partition;
pub mod fs;
pub mod mount;
pub mod report;
pub mod orchestrator;
pub mod idempotency;
pub mod util;
pub mod errors;
pub mod types; // top-level types (moved from config/types.rs for visibility)
pub use errors::{Error, Result};
/// Crate version string from Cargo.
pub const VERSION: &str = env!("CARGO_PKG_VERSION");

136
src/logging/mod.rs Normal file
View File

@@ -0,0 +1,136 @@
// REGION: API
// api: logging::LogOptions { level: String, to_file: bool }
// api: logging::LogOptions::from_cli(cli: &crate::cli::Cli) -> Self
// api: logging::init_logging(opts: &LogOptions) -> crate::Result<()>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Provide structured logging initialization via tracing, defaulting to stderr.
// - Optionally enable file logging at /run/zosstorage/zosstorage.log.
// Non-goals: runtime log level reconfiguration or external log forwarders.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: add env-filter support for selective module verbosity (feature-gated).
// ext: add JSON log formatting for machine-readability (feature-gated).
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: initialization must be idempotent; calling twice should not double-install layers.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: IO errors when opening log file -> crate::Error::Other(anyhow)
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement file layer initialization and idempotent guard.
// REGION: TODO-END
//! Logging initialization and options for zosstorage.
//!
//! Provides structured logging via the `tracing` ecosystem. Defaults to stderr,
//! with an optional file target at /run/zosstorage/zosstorage.log.
use crate::Result;
use std::fs::OpenOptions;
use std::io::{self};
use std::sync::OnceLock;
use tracing::Level;
use tracing_subscriber::fmt;
use tracing_subscriber::prelude::*;
use tracing_subscriber::registry::Registry;
use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::util::SubscriberInitExt;
/// Logging options resolved from CLI and/or config.
#[derive(Debug, Clone)]
pub struct LogOptions {
/// Level: "error" | "warn" | "info" | "debug"
pub level: String,
/// When true, also log to /run/zosstorage/zosstorage.log
pub to_file: bool,
}
impl LogOptions {
/// Construct options from [struct Cli](src/cli/mod.rs:1).
pub fn from_cli(cli: &crate::cli::Cli) -> Self {
Self {
level: cli.log_level.to_string(),
to_file: cli.log_to_file,
}
}
}
fn level_from_str(s: &str) -> Level {
match s {
"error" => Level::ERROR,
"warn" => Level::WARN,
"info" => Level::INFO,
"debug" => Level::DEBUG,
_ => Level::INFO,
}
}
static INIT_GUARD: OnceLock<()> = OnceLock::new();
/// Initialize tracing subscriber according to options.
/// Must be idempotent when called once in process lifetime.
pub fn init_logging(opts: &LogOptions) -> Result<()> {
if INIT_GUARD.get().is_some() {
return Ok(());
}
let lvl = level_from_str(&opts.level);
let stderr_layer = fmt::layer()
.with_writer(io::stderr) // no timestamps by default for initramfs
.with_ansi(false)
.with_level(true)
.with_target(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_file(false)
.with_line_number(false)
.with_filter(LevelFilter::from_level(lvl));
if opts.to_file {
let log_path = "/run/zosstorage/zosstorage.log";
if let Ok(file) = OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(log_path)
{
// Make a writer that clones the file handle per write to satisfy MakeWriter.
let make_file = move || file.try_clone().expect("failed to clone log file handle");
let file_layer = fmt::layer()
.with_writer(make_file)
.with_ansi(false)
.with_level(true)
.with_target(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_file(false)
.with_line_number(false)
.with_filter(LevelFilter::from_level(lvl));
Registry::default()
.with(stderr_layer)
.with(file_layer)
.try_init()
.map_err(|e| crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e)))?;
} else {
// Fall back to stderr-only if file cannot be opened
Registry::default()
.with(stderr_layer)
.try_init()
.map_err(|e| crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e)))?;
}
} else {
Registry::default()
.with(stderr_layer)
.try_init()
.map_err(|e| crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e)))?;
}
let _ = INIT_GUARD.set(());
Ok(())
}

56
src/main.rs Normal file
View File

@@ -0,0 +1,56 @@
// REGION: API
// api: binary::main() -> (process exit)
// api: binary::real_main() -> zosstorage::Result&lt;()&gt;
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Minimal binary wrapper: parse CLI, init logging, load+validate config, run orchestrator.
// - Emit minimal fatal errors to stderr only; no stdout spam.
// Non-goals: business logic, module orchestration details (delegated to orchestrator).
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: add --version/--help output via clap (already provided by clap derive).
// ext: add build-info banner to logs when debug level (feature-gated).
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: never print secrets; errors are concise. Avoids panics; returns proper exit codes.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: any failure bubbles as crate::Error via real_main() and is printed as a single-line stderr.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: add tracing spans around boot phases once logging init is implemented.
// REGION: TODO-END
//! Binary entrypoint for zosstorage.
//!
//! Initializes logging, parses CLI, loads/validates configuration,
//! and invokes the orchestrator run sequence. Avoids stdout spam.
use zosstorage::{Result, cli, config, logging, orchestrator};
fn main() {
if let Err(e) = real_main() {
// Minimal stderr emission permitted for fatal errors in initramfs.
eprintln!("error: {e}");
std::process::exit(1);
}
}
/// Orchestrates initialization steps and runs the provisioning flow.
fn real_main() -> Result<()> {
let cli = cli::from_args();
let log_opts = logging::LogOptions::from_cli(&cli);
logging::init_logging(&log_opts)?;
let cfg = config::load_and_merge(&cli)?;
config::validate(&cfg)?;
let ctx = orchestrator::Context::new(cfg, log_opts)
.with_show(cli.show)
.with_report_path(cli.report.clone());
orchestrator::run(&ctx)
}

12
src/mount/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
//! Mount module barrel.
//!
//! Re-exports the concrete ops implementation from ops.rs to avoid a large mod.rs.
//! See [src/mount/ops.rs](ops.rs) for details.
//
// REGION: API
// api: mount::ops::*
// REGION: API-END
pub mod ops;
pub use ops::*;

84
src/mount/ops.rs Normal file
View File

@@ -0,0 +1,84 @@
// REGION: API
// api: mount::MountPlan { entries: Vec<(String /* source */, String /* target */, String /* fstype */, String /* options */)> }
// api: mount::MountResult { source: String, target: String, fstype: String, options: String }
// api: mount::plan_mounts(fs_results: &[crate::fs::FsResult], cfg: &crate::config::types::Config) -> crate::Result<MountPlan>
// api: mount::apply_mounts(plan: &MountPlan) -> crate::Result<Vec<MountResult>>
// api: mount::maybe_write_fstab(mounts: &[MountResult], cfg: &crate::config::types::Config) -> crate::Result<()>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Translate filesystem identities to mount targets, defaulting to /var/cache/<UUID>.
// - Perform mounts using syscalls (nix) and create target directories as needed.
// - Optionally generate /etc/fstab entries in deterministic order.
// Non-goals: filesystem creation, device discovery, partitioning.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: support custom mount scheme mapping beyond per-UUID.
// ext: add configurable mount options per filesystem kind via Config.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: must ensure target directories exist and avoid overwriting unintended paths.
// safety: ensure options include sensible defaults (e.g., btrfs compress, ssd) when applicable.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: syscall failures -> crate::Error::Mount with context.
// errmap: fstab write IO errors -> crate::Error::Mount with path details.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement option synthesis (e.g., compress=zstd:3 for btrfs) based on Config and device rotational hints.
// todo: implement deterministic fstab ordering and idempotent writes.
// REGION: TODO-END
//! Mount planning and application.
//!
//! Translates filesystem results into mount targets (default under /var/cache/<UUID>)
//! and applies mounts using syscalls (via nix) in later implementation.
//!
//! See [fn plan_mounts](ops.rs:1), [fn apply_mounts](ops.rs:1),
//! and [fn maybe_write_fstab](ops.rs:1).
#![allow(dead_code)]
use crate::{Result, types::Config, fs::FsResult};
/// Mount plan entries: (source, target, fstype, options)
#[derive(Debug, Clone)]
pub struct MountPlan {
/// Source device path, target directory, filesystem type, and mount options.
pub entries: Vec<(String, String, String, String)>,
}
/// Result of applying a single mount entry.
#[derive(Debug, Clone)]
pub struct MountResult {
/// Source device path (e.g., /dev/nvme0n1p3).
pub source: String,
/// Target directory (e.g., /var/cache/<UUID>).
pub target: String,
/// Filesystem type (e.g., "btrfs", "vfat").
pub fstype: String,
/// Options string (comma-separated).
pub options: String,
}
/// Build mount plan under /var/cache/<UUID> by default.
pub fn plan_mounts(fs_results: &[FsResult], _cfg: &Config) -> Result<MountPlan> {
let _ = fs_results;
// Placeholder: map filesystem UUIDs to per-UUID directories and assemble options.
todo!("create per-UUID directories and mount mapping based on config")
}
/// Apply mounts using syscalls (nix), ensuring directories exist.
pub fn apply_mounts(_plan: &MountPlan) -> Result<Vec<MountResult>> {
// Placeholder: perform mount syscalls and return results.
todo!("perform mount syscalls and return results")
}
/// Optionally generate /etc/fstab entries in deterministic order.
pub fn maybe_write_fstab(_mounts: &[MountResult], _cfg: &Config) -> Result<()> {
// Placeholder: write fstab when enabled in configuration.
todo!("when enabled, write fstab entries deterministically")
}

6
src/orchestrator/mod.rs Normal file
View File

@@ -0,0 +1,6 @@
//! Orchestrator module barrel.
//!
//! Re-exports the concrete implementation from run.rs to avoid duplicating types/functions.
pub mod run;
pub use run::*;

372
src/orchestrator/run.rs Normal file
View File

@@ -0,0 +1,372 @@
// REGION: API
// api: orchestrator::Context { cfg: crate::config::types::Config, log: crate::logging::LogOptions }
// api: orchestrator::Context::new(cfg: crate::config::types::Config, log: crate::logging::LogOptions) -> Self
// api: orchestrator::run(ctx: &Context) -> crate::Result<()>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - High-level one-shot flow controller: idempotency check, device discovery,
// partition planning and application, filesystem creation, mounting, reporting.
// - Enforces abort-on-first-error semantics across subsystems.
// Non-goals: direct device IO or shelling out; delegates to subsystem modules.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: pluggable DeviceProvider for discovery (mocking/testing).
// ext: dry-run mode (future) to emit planned actions without applying.
// ext: hooks before/after each phase for metrics or additional validation.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: must never proceed to filesystem creation if partition planning/apply failed.
// safety: must exit success without changes when idempotency detection indicates provisioned.
// safety: must ensure reporting only on overall success (no partial-success report).
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: subsystem errors bubble up as crate::Error::* without stringly-typed loss.
// errmap: external tool failures are expected as Error::Tool from util layer.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement orchestration steps in phases with structured logs and timing.
// todo: add per-phase tracing spans and outcome summaries.
// REGION: TODO-END
//! High-level orchestration for zosstorage.
//!
//! Drives the one-shot provisioning flow:
//! - Idempotency detection
//! - Device discovery
//! - Partition planning and application
//! - Filesystem planning and creation
//! - Mount planning and application
//! - Report generation and write
use crate::{
types::Config,
logging::LogOptions,
device::{discover, DeviceFilter, Disk},
idempotency,
partition,
Error, Result,
};
use humantime::format_rfc3339;
use regex::Regex;
use serde_json::{json, to_value};
use std::fs;
use std::time::SystemTime;
use tracing::{debug, info, warn};
/// Execution context holding resolved configuration and environment flags.
#[derive(Debug, Clone)]
pub struct Context {
/// Validated configuration.
pub cfg: Config,
/// Logging options in effect.
pub log: LogOptions,
/// When true, print detection and planning summary to stdout (JSON).
pub show: bool,
/// Optional report path override (when provided by CLI --report).
pub report_path_override: Option<String>,
}
impl Context {
/// Construct a new context from config and logging options.
pub fn new(cfg: Config, log: LogOptions) -> Self {
Self {
cfg,
log,
show: false,
report_path_override: None,
}
}
/// Builder: enable showing summary to stdout.
pub fn with_show(mut self, show: bool) -> Self {
self.show = show;
self
}
/// Builder: override report path.
pub fn with_report_path(mut self, path: Option<String>) -> Self {
self.report_path_override = path;
self
}
}
/// Run the one-shot provisioning flow.
///
/// Returns Ok(()) on success and also on success-noop when already provisioned.
/// Any validation or execution failure aborts with an error.
pub fn run(ctx: &Context) -> Result<()> {
info!("orchestrator: starting run() with topology {:?}", ctx.cfg.topology);
// 1) Idempotency pre-flight: if already provisioned, optionally emit summary then exit success.
match idempotency::detect_existing_state()? {
Some(state) => {
info!("orchestrator: already provisioned");
if ctx.show || ctx.report_path_override.is_some() {
let now = format_rfc3339(SystemTime::now()).to_string();
let state_json = to_value(&state).map_err(|e| {
Error::Report(format!("failed to serialize StateReport: {}", e))
})?;
let summary = json!({
"version": "v1",
"timestamp": now,
"status": "already_provisioned",
"state": state_json
});
if ctx.show {
println!("{}", summary);
}
if let Some(path) = &ctx.report_path_override {
fs::write(path, summary.to_string())
.map_err(|e| Error::Report(format!("failed to write report to {}: {}", path, e)))?;
info!("orchestrator: wrote idempotency report to {}", path);
}
}
return Ok(());
}
None => {
debug!("orchestrator: not provisioned; continuing");
}
}
// 2) Device discovery using compiled filter from config.
let filter = build_device_filter(&ctx.cfg)?;
let disks = discover(&filter)?;
info!("orchestrator: discovered {} eligible disk(s)", disks.len());
// 3) Emptiness enforcement: skip in preview mode (--show/--report) to allow planning output.
let preview = ctx.show || ctx.report_path_override.is_some();
if ctx.cfg.partitioning.require_empty_disks && !preview {
enforce_empty_disks(&disks)?;
info!("orchestrator: all target disks verified empty");
} else if ctx.cfg.partitioning.require_empty_disks && preview {
warn!("orchestrator: preview mode detected (--show/--report); skipping empty-disk enforcement");
} else {
warn!("orchestrator: require_empty_disks=false; proceeding without emptiness enforcement");
}
// 4) Partition planning (declarative only; application not yet implemented in this step).
let plan = partition::plan_partitions(&disks, &ctx.cfg)?;
debug!(
"orchestrator: partition plan ready (alignment={} MiB, disks={})",
plan.alignment_mib,
plan.disks.len()
);
for dp in &plan.disks {
debug!("plan for {}: {} part(s)", dp.disk.path, dp.parts.len());
}
// Note:
// - Applying partitions, creating filesystems, mounting, and reporting
// will be wired in subsequent steps. For now this performs pre-flight
// checks and planning to exercise real code paths safely.
info!("orchestrator: pre-flight complete (idempotency checked, devices discovered, plan computed)");
// Optional: emit JSON summary via --show or write via --report
if ctx.show || ctx.report_path_override.is_some() {
let summary = build_summary_json(&disks, &plan, &ctx.cfg)?;
if ctx.show {
// Print compact JSON to stdout
println!("{}", summary);
}
if let Some(path) = &ctx.report_path_override {
// Best-effort write (non-atomic for now, pending report::write_report implementation)
fs::write(path, summary.to_string()).map_err(|e| {
Error::Report(format!("failed to write report to {}: {}", path, e))
})?;
info!("orchestrator: wrote summary report to {}", path);
}
}
Ok(())
}
fn build_device_filter(cfg: &Config) -> Result<DeviceFilter> {
let mut include = Vec::new();
let mut exclude = Vec::new();
for pat in &cfg.device_selection.include_patterns {
let re = Regex::new(pat).map_err(|e| {
Error::Validation(format!("invalid include regex '{}': {}", pat, e))
})?;
include.push(re);
}
for pat in &cfg.device_selection.exclude_patterns {
let re = Regex::new(pat).map_err(|e| {
Error::Validation(format!("invalid exclude regex '{}': {}", pat, e))
})?;
exclude.push(re);
}
Ok(DeviceFilter {
include,
exclude,
min_size_gib: cfg.device_selection.min_size_gib,
allow_removable: cfg.device_selection.allow_removable,
})
}
fn enforce_empty_disks(disks: &[Disk]) -> Result<()> {
for d in disks {
let empty = idempotency::is_empty_disk(d)?;
if !empty {
return Err(Error::Validation(format!(
"target disk {} is not empty (partitions or signatures present)",
d.path
)));
}
}
Ok(())
}
fn role_str(role: partition::PartRole) -> &'static str {
match role {
partition::PartRole::BiosBoot => "bios_boot",
partition::PartRole::Esp => "esp",
partition::PartRole::Data => "data",
partition::PartRole::Cache => "cache",
}
}
fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Config) -> Result<serde_json::Value> {
// Disks summary
let disks_json: Vec<serde_json::Value> = disks
.iter()
.map(|d| {
json!({
"path": d.path,
"size_bytes": d.size_bytes,
"rotational": d.rotational,
"model": d.model,
"serial": d.serial,
})
})
.collect();
// Partition plan summary (spec-level)
let mut plan_json: Vec<serde_json::Value> = Vec::new();
for dp in &plan.disks {
let parts: Vec<serde_json::Value> = dp
.parts
.iter()
.map(|p| {
json!({
"role": role_str(p.role),
"size_mib": p.size_mib, // null means "remainder"
"gpt_name": p.gpt_name,
})
})
.collect();
plan_json.push(json!({
"disk": dp.disk.path,
"parts": parts
}));
}
// Decide filesystem kinds and planned mountpoints (template) from plan + cfg.topology
let topo_str = match cfg.topology {
crate::types::Topology::Single => "single",
crate::types::Topology::DualIndependent => "dual_independent",
crate::types::Topology::SsdHddBcachefs => "ssd_hdd_bcachefs",
crate::types::Topology::BtrfsRaid1 => "btrfs_raid1",
};
// Count roles across plan to infer filesystems
let mut esp_count = 0usize;
let mut data_count = 0usize;
let mut cache_count = 0usize;
for dp in &plan.disks {
for p in &dp.parts {
match p.role {
partition::PartRole::Esp => esp_count += 1,
partition::PartRole::Data => data_count += 1,
partition::PartRole::Cache => cache_count += 1,
partition::PartRole::BiosBoot => {}
}
}
}
let mut filesystems_planned: Vec<serde_json::Value> = Vec::new();
// ESP -> vfat (typically mounted by bootloader; no runtime target here)
if esp_count > 0 {
filesystems_planned.push(json!({
"kind": "vfat",
"from_roles": ["esp"],
"label": cfg.filesystem.vfat.label,
"planned_mountpoint": null
}));
}
// Data/cache-driven FS + mount targets. Mount scheme is per-UUID under base_dir.
let target_template = format!("{}/{{UUID}}", cfg.mount.base_dir);
match cfg.topology {
crate::types::Topology::SsdHddBcachefs => {
if cache_count > 0 && data_count > 0 {
filesystems_planned.push(json!({
"kind": "bcachefs",
"from_roles": ["cache", "data"],
"label": cfg.filesystem.bcachefs.label,
"planned_mountpoint_template": target_template,
}));
}
}
crate::types::Topology::BtrfsRaid1 => {
// One multi-device btrfs across all data partitions
if data_count >= 2 {
filesystems_planned.push(json!({
"kind": "btrfs",
"from_roles": ["data"],
"devices_planned": data_count,
"label": cfg.filesystem.btrfs.label,
"planned_mountpoint_template": target_template,
}));
} else if data_count == 1 {
filesystems_planned.push(json!({
"kind": "btrfs",
"from_roles": ["data"],
"label": cfg.filesystem.btrfs.label,
"planned_mountpoint_template": target_template,
"note": "only one data partition present; raid1 requires >= 2",
}));
}
}
_ => {
// One btrfs per data partition
for _ in 0..data_count {
filesystems_planned.push(json!({
"kind": "btrfs",
"from_roles": ["data"],
"label": cfg.filesystem.btrfs.label,
"planned_mountpoint_template": target_template,
}));
}
}
}
let mount_scheme = json!({
"scheme": "per_uuid",
"base_dir": cfg.mount.base_dir,
"fstab_enabled": cfg.mount.fstab_enabled,
"target_template": target_template,
});
let now = format_rfc3339(SystemTime::now()).to_string();
let summary = json!({
"version": "v1",
"timestamp": now,
"status": "planned",
"topology": topo_str,
"alignment_mib": plan.alignment_mib,
"require_empty_disks": plan.require_empty_disks,
"disks": disks_json,
"partition_plan": plan_json,
"filesystems_planned": filesystems_planned,
"mount": mount_scheme
});
Ok(summary)
}

12
src/partition/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
//! Partition module barrel.
//!
//! Re-exports the concrete planning/apply implementation from plan.rs to avoid a large mod.rs.
//! See [src/partition/plan.rs](plan.rs) for details.
//
// REGION: API
// api: partition::plan::*
// REGION: API-END
pub mod plan;
pub use plan::*;

290
src/partition/plan.rs Normal file
View File

@@ -0,0 +1,290 @@
// REGION: API
// api: partition::PartRole { BiosBoot, Esp, Data, Cache }
// api: partition::PartitionSpec { role: PartRole, size_mib: Option<u64>, gpt_name: String }
// api: partition::DiskPlan { disk: crate::device::Disk, parts: Vec<PartitionSpec> }
// api: partition::PartitionPlan { alignment_mib: u64, disks: Vec<DiskPlan>, require_empty_disks: bool }
// api: partition::PartitionResult { disk: String, part_number: u32, role: PartRole, gpt_name: String, uuid: String, start_mib: u64, size_mib: u64, device_path: String }
// api: partition::plan_partitions(disks: &[crate::device::Disk], cfg: &crate::config::types::Config) -> crate::Result<PartitionPlan>
// api: partition::apply_partitions(plan: &PartitionPlan) -> crate::Result<Vec<PartitionResult>>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Compute a declarative GPT partitioning plan per topology with 1 MiB alignment.
// - Apply the plan safely via system tools (sgdisk) using util wrappers.
// Non-goals: filesystem creation, mounting, reporting.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: support additional partition roles (e.g., metadata) via PartRole extension.
// ext: device-specific alignment or reserved areas configurable via cfg in the future.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: must verify require_empty_disks before any modification.
// safety: must ensure unique partition GUIDs; identical labels are allowed when expected (e.g., ESP ZOSBOOT).
// safety: must call udev settle after partition table writes.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: external tool failure -> crate::Error::Tool { tool, status, stderr }.
// errmap: validation and planning errors -> crate::Error::Partition with clear context.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement topology-aware layout including SSD/HDD cache/backing with gpt_name zoscache.
// todo: integrate blkid probing to confirm absence of FS signatures prior to changes.
// REGION: TODO-END
//! GPT partition planning and application.
//!
//! Provides declarative planning APIs and an apply step that will later
//! shell out to system tools (sgdisk) wrapped via util helpers.
//!
//! See [fn plan_partitions](plan.rs:1) and
//! [fn apply_partitions](plan.rs:1).
use crate::{types::{Config, Topology}, device::Disk, Error, Result};
/// Partition roles supported by zosstorage.
#[derive(Debug, Clone, Copy)]
pub enum PartRole {
/// Tiny BIOS boot partition (no filesystem).
BiosBoot,
/// EFI System Partition (vfat, label ZOSBOOT).
Esp,
/// Primary data partition.
Data,
/// Cache partition (for bcachefs SSD roles).
Cache,
}
/// Declarative spec for a partition on a disk.
#[derive(Debug, Clone)]
pub struct PartitionSpec {
/// Role of this partition.
pub role: PartRole,
/// Explicit size in MiB; None means "use remainder".
pub size_mib: Option<u64>,
/// GPT partition name (zosboot, zosdata, zoscache).
pub gpt_name: String,
}
/// Plan for a single disk.
#[derive(Debug, Clone)]
pub struct DiskPlan {
/// Target disk.
pub disk: Disk,
/// Ordered partition specs for the disk.
pub parts: Vec<PartitionSpec>,
}
/// Full partitioning plan across all target disks.
#[derive(Debug, Clone)]
pub struct PartitionPlan {
/// Alignment in MiB (1 by default).
pub alignment_mib: u64,
/// Plans per disk.
pub disks: Vec<DiskPlan>,
/// When true, abort if any target disk is not empty.
pub require_empty_disks: bool,
}
/// Result of applying partitioning on a particular disk.
#[derive(Debug, Clone)]
pub struct PartitionResult {
/// Parent disk path (e.g., /dev/nvme0n1).
pub disk: String,
/// Partition index number (1-based).
pub part_number: u32,
/// Role assigned to this partition.
pub role: PartRole,
/// GPT partition name used.
pub gpt_name: String,
/// Partition GUID.
pub uuid: String,
/// Start offset in MiB.
pub start_mib: u64,
/// Size in MiB.
pub size_mib: u64,
/// Partition device path (e.g., /dev/nvme0n1p2).
pub device_path: String,
}
/**
Compute GPT-only plan per topology and constraints.
Layout defaults:
- BIOS boot: cfg.partitioning.bios_boot if enabled (size_mib)
- ESP: cfg.partitioning.esp.size_mib, GPT name cfg.partitioning.esp.gpt_name (typically "zosboot")
- Data: remainder, GPT name cfg.partitioning.data.gpt_name ("zosdata")
- Cache (only for SSD/HDD topology): remainder on SSD after boot/ESP, GPT name cfg.partitioning.cache.gpt_name ("zoscache")
Topology mapping:
- Single: use first eligible disk; create BIOS (opt) + ESP + Data
- DualIndependent: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
- BtrfsRaid1: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
- SsdHddBcachefs: need >=1 SSD (rotational=false) and >=1 HDD (rotational=true);
SSD: BIOS (opt) + ESP + Cache; HDD: Data
*/
pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
let align = cfg.partitioning.alignment_mib;
let require_empty = cfg.partitioning.require_empty_disks;
if disks.is_empty() {
return Err(Error::Partition("no disks provided to partition planner".into()));
}
let mut plans: Vec<DiskPlan> = Vec::new();
match cfg.topology {
Topology::Single => {
let d0 = &disks[0];
let mut parts = Vec::new();
if cfg.partitioning.bios_boot.enabled {
parts.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d0.clone(), parts });
}
Topology::DualIndependent => {
if disks.len() < 2 {
return Err(Error::Partition("DualIndependent topology requires at least 2 disks".into()));
}
let d0 = &disks[0];
let d1 = &disks[1];
// Disk 0: BIOS (opt) + ESP + Data
let mut parts0 = Vec::new();
if cfg.partitioning.bios_boot.enabled {
parts0.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts0.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts0.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d0.clone(), parts: parts0 });
// Disk 1: Data only
let mut parts1 = Vec::new();
parts1.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
}
Topology::BtrfsRaid1 => {
if disks.len() < 2 {
return Err(Error::Partition("BtrfsRaid1 topology requires at least 2 disks".into()));
}
let d0 = &disks[0];
let d1 = &disks[1];
// Disk 0: BIOS (opt) + ESP + Data
let mut parts0 = Vec::new();
if cfg.partitioning.bios_boot.enabled {
parts0.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts0.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts0.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d0.clone(), parts: parts0 });
// Disk 1: Data only (for RAID1)
let mut parts1 = Vec::new();
parts1.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
}
Topology::SsdHddBcachefs => {
// Choose SSD (rotational=false) and HDD (rotational=true)
let ssd = disks.iter().find(|d| !d.rotational)
.ok_or_else(|| Error::Partition("SsdHddBcachefs requires an SSD (non-rotational) disk".into()))?;
let hdd = disks.iter().find(|d| d.rotational)
.ok_or_else(|| Error::Partition("SsdHddBcachefs requires an HDD (rotational) disk".into()))?;
// SSD: BIOS (opt) + ESP + Cache remainder
let mut parts_ssd = Vec::new();
if cfg.partitioning.bios_boot.enabled {
parts_ssd.push(PartitionSpec {
role: PartRole::BiosBoot,
size_mib: Some(cfg.partitioning.bios_boot.size_mib),
gpt_name: cfg.partitioning.bios_boot.gpt_name.clone(),
});
}
parts_ssd.push(PartitionSpec {
role: PartRole::Esp,
size_mib: Some(cfg.partitioning.esp.size_mib),
gpt_name: cfg.partitioning.esp.gpt_name.clone(),
});
parts_ssd.push(PartitionSpec {
role: PartRole::Cache,
size_mib: None,
gpt_name: cfg.partitioning.cache.gpt_name.clone(),
});
plans.push(DiskPlan { disk: ssd.clone(), parts: parts_ssd });
// HDD: Data remainder
let mut parts_hdd = Vec::new();
parts_hdd.push(PartitionSpec {
role: PartRole::Data,
size_mib: None,
gpt_name: cfg.partitioning.data.gpt_name.clone(),
});
plans.push(DiskPlan { disk: hdd.clone(), parts: parts_hdd });
}
}
Ok(PartitionPlan {
alignment_mib: align,
disks: plans,
require_empty_disks: require_empty,
})
}
/// Apply the partition plan using system utilities (sgdisk) via util wrappers.
///
/// Safety:
/// - Must verify target disks are empty when required.
/// - Must ensure unique partition GUIDs.
/// - Should call udev settle after changes.
pub fn apply_partitions(_plan: &PartitionPlan) -> Result<Vec<PartitionResult>> {
// To be implemented: sgdisk orchestration + udev settle + GUID collection
todo!("shell out to sgdisk, trigger udev settle, collect partition GUIDs")
}

12
src/report/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
//! Report module barrel.
//!
//! Re-exports the concrete implementation from state.rs to avoid a large mod.rs.
//! See [src/report/state.rs](state.rs) for details.
//
// REGION: API
// api: report::state::*
// REGION: API-END
pub mod state;
pub use state::*;

80
src/report/state.rs Normal file
View File

@@ -0,0 +1,80 @@
// REGION: API
// api: report::REPORT_VERSION: &str
// api: report::StateReport { version: String, timestamp: String, status: String, disks: Vec<serde_json::Value>, partitions: Vec<serde_json::Value>, filesystems: Vec<serde_json::Value>, mounts: Vec<serde_json::Value>, error: Option<String> }
// api: report::build_report(disks: &[serde_json::Value], parts: &[serde_json::Value], fs: &[serde_json::Value], mounts: &[serde_json::Value], status: &str) -> StateReport
// api: report::write_report(report: &StateReport, path: &str) -> crate::Result<()>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Construct and persist a machine-readable JSON describing the provisioning outcome.
// - Maintain a versioned schema via REPORT_VERSION and ensure forward compatibility guidance.
// Non-goals: orchestrating actions or mutating system state beyond writing the report file.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: add typed sub-structures for disks/partitions/filesystems/mounts when schema stabilizes.
// ext: emit an additional compact/summary report for boot logs.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: write atomically (tempfile + rename) to avoid partial report reads.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: IO/serialization errors -> crate::Error::Report with clear path/context.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement atomic write using tempfile and fs::rename.
// todo: include monotonic timestamps or sequence numbers if required.
// REGION: TODO-END
//! Machine-readable state reporting for zosstorage.
//!
//! Emits a JSON report describing discovered disks, partitions, filesystems,
//! mounts, overall status, and timestamp. The schema is versioned.
use crate::Result;
use serde::{Deserialize, Serialize};
/// Report payload version string.
pub const REPORT_VERSION: &str = "v1";
/// State report structure (versioned).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StateReport {
/// Payload version (e.g., "v1").
pub version: String,
/// RFC3339 timestamp.
pub timestamp: String,
/// "success" | "already_provisioned" | "error"
pub status: String,
/// Disks (shape defined by implementation; kept flexible for now).
pub disks: Vec<serde_json::Value>,
/// Partitions (shape defined by implementation; kept flexible for now).
pub partitions: Vec<serde_json::Value>,
/// Filesystems (shape defined by implementation; kept flexible for now).
pub filesystems: Vec<serde_json::Value>,
/// Mounts (shape defined by implementation; kept flexible for now).
pub mounts: Vec<serde_json::Value>,
/// Optional error message when status == "error".
pub error: Option<String>,
}
/// Build the machine-readable state report from inputs.
///
/// The concrete shapes for disks/partitions/filesystems/mounts are intentionally
/// flexible (serde_json::Value) in this skeleton and will be formalized later.
pub fn build_report(
_disks: &[serde_json::Value],
_parts: &[serde_json::Value],
_fs: &[serde_json::Value],
_mounts: &[serde_json::Value],
_status: &str,
) -> StateReport {
todo!("assemble structured report in v1 format with timestamp and inputs")
}
/// Write the state report JSON to disk (default path in config: /run/zosstorage/state.json).
pub fn write_report(_report: &StateReport, _path: &str) -> Result<()> {
todo!("serialize to JSON and persist atomically via tempfile and rename")
}

170
src/types.rs Normal file
View File

@@ -0,0 +1,170 @@
//! Typed configuration schema for zosstorage.
//!
//! Mirrors docs in [docs/SCHEMA.md](docs/SCHEMA.md) and is loaded/validated by
//! [fn load_and_merge()](src/config/loader.rs:1) and [fn validate()](src/config/loader.rs:1).
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoggingConfig {
/// Log level: "error" | "warn" | "info" | "debug"
pub level: String, // default "info"
/// When true, also log to /run/zosstorage/zosstorage.log
pub to_file: bool, // default false
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeviceSelection {
/// Regex patterns to include device paths (Rust regex).
pub include_patterns: Vec<String>,
/// Regex patterns to exclude device paths.
pub exclude_patterns: Vec<String>,
/// Whether to include removable devices (future).
pub allow_removable: bool,
/// Minimum device size (GiB) to consider eligible.
pub min_size_gib: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum Topology {
/// Single eligible disk; btrfs on remainder.
Single,
/// Two eligible disks; independent btrfs on each data partition.
DualIndependent,
/// SSD + HDD; bcachefs with SSD cache/promote and HDD backing.
SsdHddBcachefs,
/// Optional mirrored btrfs across two disks when explicitly requested.
BtrfsRaid1,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BiosBootSpec {
/// Whether to create a tiny BIOS boot partition.
pub enabled: bool,
/// Size in MiB (default 1).
pub size_mib: u64,
/// GPT partition name (e.g., "zosboot").
pub gpt_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EspSpec {
/// ESP size in MiB (default 512).
pub size_mib: u64,
/// Filesystem label for ESP (ZOSBOOT).
pub label: String,
/// GPT partition name (e.g., "zosboot").
pub gpt_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DataSpec {
/// GPT partition name for data (e.g., "zosdata").
pub gpt_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CacheSpec {
/// GPT partition name for cache partitions (e.g., "zoscache").
pub gpt_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Partitioning {
/// Alignment in MiB (default 1 MiB).
pub alignment_mib: u64,
/// Abort if any target disk is not empty (default true).
pub require_empty_disks: bool,
/// BIOS boot partition spec.
pub bios_boot: BiosBootSpec,
/// ESP partition spec.
pub esp: EspSpec,
/// Data partition spec.
pub data: DataSpec,
/// Cache partition spec (only in ssd_hdd_bcachefs).
pub cache: CacheSpec,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BtrfsOptions {
/// Filesystem label (ZOSDATA).
pub label: String,
/// Compression string (e.g., "zstd:3").
pub compression: String,
/// "none" | "raid1"
pub raid_profile: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BcachefsOptions {
/// Filesystem label (ZOSDATA).
pub label: String,
/// "promote" | "writeback" (if supported).
pub cache_mode: String,
/// Compression algorithm (e.g., "zstd").
pub compression: String,
/// Checksum algorithm (e.g., "crc32c").
pub checksum: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VfatOptions {
/// Filesystem label (ZOSBOOT).
pub label: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FsOptions {
/// btrfs tuning.
pub btrfs: BtrfsOptions,
/// bcachefs tuning.
pub bcachefs: BcachefsOptions,
/// vfat tuning for ESP.
pub vfat: VfatOptions,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum MountSchemeKind {
/// Mount under /var/cache/<UUID>
PerUuid,
/// Reserved for future custom mappings.
Custom,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MountScheme {
/// Base directory (default: /var/cache).
pub base_dir: String,
/// Scheme kind (PerUuid | Custom (reserved)).
pub scheme: MountSchemeKind,
/// When true, write /etc/fstab entries (disabled by default).
pub fstab_enabled: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReportOptions {
/// Path for JSON state report (default: /run/zosstorage/state.json).
pub path: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
/// Schema version (start at 1).
pub version: u32,
/// Logging configuration.
pub logging: LoggingConfig,
/// Device selection and filtering rules.
pub device_selection: DeviceSelection,
/// Desired topology mode.
pub topology: Topology,
/// Partitioning parameters.
pub partitioning: Partitioning,
/// Filesystem options and tuning.
pub filesystem: FsOptions,
/// Mount scheme and fstab policy.
pub mount: MountScheme,
/// Report output configuration.
pub report: ReportOptions,
}

197
src/util/mod.rs Normal file
View File

@@ -0,0 +1,197 @@
// REGION: API
// api: util::CmdOutput { status: i32, stdout: String, stderr: String }
// api: util::which_tool(name: &str) -> crate::Result&lt;Option&lt;String&gt;&gt;
// api: util::run_cmd(args: &[&str]) -> crate::Result&lt;()&gt;
// api: util::run_cmd_capture(args: &[&str]) -> crate::Result&lt;CmdOutput&gt;
// api: util::udev_settle(timeout_ms: u64) -> crate::Result&lt;()&gt;
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - Centralize external tool discovery and invocation (sgdisk, blkid, mkfs.*, udevadm).
// - Provide capture and error mapping to crate::Error consistently.
// Non-goals: business logic (planning/validation), direct parsing of complex outputs beyond what callers need.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: pluggable command runner for tests/dry-run; inject via cfg(test) or trait in future.
// ext: backoff/retry policies for transient tool failures (feature-gated).
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: never mutate state here except invoking intended external tools; callers enforce preconditions.
// safety: capture stderr/stdout to aid diagnostics without leaking sensitive data.
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: process::ExitStatus non-zero -> crate::Error::Tool { tool, status, stderr }.
// errmap: IO/spawn errors -> crate::Error::Other(anyhow) with context.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement which_tool via 'which' crate; consider PATH overrides in initramfs.
// todo: implement run_cmd and run_cmd_capture with tracing spans.
// todo: implement udev_settle to no-op if udevadm missing, with warn-level log.
// REGION: TODO-END
//! Utility helpers for external tool invocation and system integration.
//!
//! All shell-outs are centralized here to enable testing, structured logging,
//! and consistent error handling.
use crate::{Error, Result};
use std::process::Command;
use tracing::{debug, warn};
/// Captured output from an external tool invocation.
#[derive(Debug, Clone)]
pub struct CmdOutput {
/// Process exit status code.
pub status: i32,
/// Captured stdout as UTF-8 (lossy if needed).
pub stdout: String,
/// Captured stderr as UTF-8 (lossy if needed).
pub stderr: String,
}
/// Locate the absolute path to a required tool if available in PATH.
///
/// Returns Ok(Some(path)) when found, Ok(None) when missing.
pub fn which_tool(name: &str) -> Result<Option<String>> {
match which::which(name) {
Ok(path) => Ok(Some(path.to_string_lossy().into_owned())),
Err(which::Error::CannotFindBinaryPath) => Ok(None),
Err(e) => Err(Error::Other(anyhow::anyhow!("which({name}) failed: {e}"))),
}
}
/// Run a command and return Ok if the exit status is zero.
///
/// args[0] must be the program binary path; the rest are arguments.
/// On non-zero exit, returns Error::Tool with captured stderr.
pub fn run_cmd(args: &[&str]) -> Result<()> {
if args.is_empty() {
return Err(Error::Other(anyhow::anyhow!(
"run_cmd requires at least one arg (the program)"
)));
}
debug!(target: "util.run_cmd", "exec: {:?}", args);
let output = Command::new(args[0]).args(&args[1..]).output().map_err(|e| {
Error::Other(anyhow::anyhow!("failed to spawn {:?}: {}", args, e))
})?;
let status_code = output.status.code().unwrap_or(-1);
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
return Err(Error::Tool {
tool: args[0].to_string(),
status: status_code,
stderr,
});
}
Ok(())
}
/// Run a command and capture stdout/stderr for parsing (e.g., blkid).
///
/// On non-zero exit, returns Error::Tool with captured stderr and status.
pub fn run_cmd_capture(args: &[&str]) -> Result<CmdOutput> {
if args.is_empty() {
return Err(Error::Other(anyhow::anyhow!(
"run_cmd_capture requires at least one arg (the program)"
)));
}
debug!(target: "util.run_cmd_capture", "exec: {:?}", args);
let output = Command::new(args[0]).args(&args[1..]).output().map_err(|e| {
Error::Other(anyhow::anyhow!("failed to spawn {:?}: {}", args, e))
})?;
let status_code = output.status.code().unwrap_or(-1);
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
if !output.status.success() {
return Err(Error::Tool {
tool: args[0].to_string(),
status: status_code,
stderr,
});
}
Ok(CmdOutput {
status: status_code,
stdout,
stderr,
})
}
/// Call udevadm settle with a timeout; warn if unavailable, then no-op.
///
/// Ensures the system has settled after partition table changes.
pub fn udev_settle(timeout_ms: u64) -> Result<()> {
// Locate udevadm
let Some(udevadm) = which_tool("udevadm")? else {
warn!("udevadm not found; skipping udev settle");
return Ok(());
};
let timeout_arg = format!("--timeout={}", timeout_ms / 1000); // udevadm takes seconds; floor
// Some implementations accept milliseconds if provided without units; prefer seconds for portability.
let args = [udevadm.as_str(), "settle", timeout_arg.as_str()];
// We intentionally ignore non-zero exit as some initramfs may not have udev running.
match Command::new(args[0]).args(&args[1..]).status() {
Ok(_status) => {
debug!("udevadm settle invoked");
Ok(())
}
Err(e) => {
warn!("failed to invoke udevadm settle: {}", e);
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn which_tool_finds_sh() {
// 'sh' should exist on virtually all Linux systems
let p = which_tool("sh").expect("which_tool failed");
assert!(p.is_some(), "expected to find 'sh' in PATH");
}
#[test]
fn which_tool_missing() {
let p = which_tool("definitely-not-a-cmd-xyz").expect("which_tool failed");
assert!(p.is_none());
}
#[test]
fn run_cmd_true_ok() {
// Use sh -c true to ensure availability
run_cmd(&["sh", "-c", "true"]).expect("true should succeed");
}
#[test]
fn run_cmd_false_err() {
let err = run_cmd(&["sh", "-c", "false"]).unwrap_err();
match err {
Error::Tool { tool, status, .. } => {
assert_eq!(tool, "sh");
assert_ne!(status, 0);
}
other => panic!("expected Error::Tool, got: {:?}", other),
}
}
#[test]
fn run_cmd_capture_echo_stdout() {
let out = run_cmd_capture(&["sh", "-c", "printf hello"]).expect("capture ok");
assert_eq!(out.stdout, "hello");
assert_eq!(out.status, 0);
}
#[test]
fn udev_settle_ok_even_if_missing() {
// Should never fail even if udevadm is missing.
udev_settle(1000).expect("udev_settle should be non-fatal");
}
}