feat: first-draft preview-capable zosstorage

- CLI: add topology selection (-t/--topology), preview flags (--show/--report), and removable policy override (--allow-removable) (src/cli/args.rs)
- Config: built-in sensible defaults; deterministic overlays for logging, fstab, removable, topology (src/config/loader.rs)
- Device: discovery via /proc + /sys with include/exclude regex and removable policy (src/device/discovery.rs)
- Idempotency: detection via blkid; safe emptiness checks (src/idempotency/mod.rs)
- Partition: topology-driven planning (Single, DualIndependent, BtrfsRaid1, SsdHddBcachefs) (src/partition/plan.rs)
- FS: planning + creation (mkfs.vfat, mkfs.btrfs, bcachefs format) and UUID capture via blkid (src/fs/plan.rs)
- Orchestrator: pre-flight with preview JSON (disks, partition_plan, filesystems_planned, mount scheme). Skips emptiness in preview; supports stdout+file (src/orchestrator/run.rs)
- Util/Logging/Types/Errors: process execution, tracing, shared types (src/util/mod.rs, src/logging/mod.rs, src/types.rs, src/errors.rs)
- Docs: add README with exhaustive usage and preview JSON shape (README.md)

Builds and unit tests pass: discovery, util, idempotency helpers, and fs parser tests.
This commit is contained in:
2025-09-29 11:37:07 +02:00
commit 507bc172c2
38 changed files with 6558 additions and 0 deletions

372
src/orchestrator/run.rs Normal file
View File

@@ -0,0 +1,372 @@
// REGION: API
// api: orchestrator::Context { cfg: crate::config::types::Config, log: crate::logging::LogOptions }
// api: orchestrator::Context::new(cfg: crate::config::types::Config, log: crate::logging::LogOptions) -> Self
// api: orchestrator::run(ctx: &Context) -> crate::Result<()>
// REGION: API-END
//
// REGION: RESPONSIBILITIES
// - High-level one-shot flow controller: idempotency check, device discovery,
// partition planning and application, filesystem creation, mounting, reporting.
// - Enforces abort-on-first-error semantics across subsystems.
// Non-goals: direct device IO or shelling out; delegates to subsystem modules.
// REGION: RESPONSIBILITIES-END
//
// REGION: EXTENSION_POINTS
// ext: pluggable DeviceProvider for discovery (mocking/testing).
// ext: dry-run mode (future) to emit planned actions without applying.
// ext: hooks before/after each phase for metrics or additional validation.
// REGION: EXTENSION_POINTS-END
//
// REGION: SAFETY
// safety: must never proceed to filesystem creation if partition planning/apply failed.
// safety: must exit success without changes when idempotency detection indicates provisioned.
// safety: must ensure reporting only on overall success (no partial-success report).
// REGION: SAFETY-END
//
// REGION: ERROR_MAPPING
// errmap: subsystem errors bubble up as crate::Error::* without stringly-typed loss.
// errmap: external tool failures are expected as Error::Tool from util layer.
// REGION: ERROR_MAPPING-END
//
// REGION: TODO
// todo: implement orchestration steps in phases with structured logs and timing.
// todo: add per-phase tracing spans and outcome summaries.
// REGION: TODO-END
//! High-level orchestration for zosstorage.
//!
//! Drives the one-shot provisioning flow:
//! - Idempotency detection
//! - Device discovery
//! - Partition planning and application
//! - Filesystem planning and creation
//! - Mount planning and application
//! - Report generation and write
use crate::{
types::Config,
logging::LogOptions,
device::{discover, DeviceFilter, Disk},
idempotency,
partition,
Error, Result,
};
use humantime::format_rfc3339;
use regex::Regex;
use serde_json::{json, to_value};
use std::fs;
use std::time::SystemTime;
use tracing::{debug, info, warn};
/// Execution context holding resolved configuration and environment flags.
#[derive(Debug, Clone)]
pub struct Context {
/// Validated configuration.
pub cfg: Config,
/// Logging options in effect.
pub log: LogOptions,
/// When true, print detection and planning summary to stdout (JSON).
pub show: bool,
/// Optional report path override (when provided by CLI --report).
pub report_path_override: Option<String>,
}
impl Context {
/// Construct a new context from config and logging options.
pub fn new(cfg: Config, log: LogOptions) -> Self {
Self {
cfg,
log,
show: false,
report_path_override: None,
}
}
/// Builder: enable showing summary to stdout.
pub fn with_show(mut self, show: bool) -> Self {
self.show = show;
self
}
/// Builder: override report path.
pub fn with_report_path(mut self, path: Option<String>) -> Self {
self.report_path_override = path;
self
}
}
/// Run the one-shot provisioning flow.
///
/// Returns Ok(()) on success and also on success-noop when already provisioned.
/// Any validation or execution failure aborts with an error.
pub fn run(ctx: &Context) -> Result<()> {
info!("orchestrator: starting run() with topology {:?}", ctx.cfg.topology);
// 1) Idempotency pre-flight: if already provisioned, optionally emit summary then exit success.
match idempotency::detect_existing_state()? {
Some(state) => {
info!("orchestrator: already provisioned");
if ctx.show || ctx.report_path_override.is_some() {
let now = format_rfc3339(SystemTime::now()).to_string();
let state_json = to_value(&state).map_err(|e| {
Error::Report(format!("failed to serialize StateReport: {}", e))
})?;
let summary = json!({
"version": "v1",
"timestamp": now,
"status": "already_provisioned",
"state": state_json
});
if ctx.show {
println!("{}", summary);
}
if let Some(path) = &ctx.report_path_override {
fs::write(path, summary.to_string())
.map_err(|e| Error::Report(format!("failed to write report to {}: {}", path, e)))?;
info!("orchestrator: wrote idempotency report to {}", path);
}
}
return Ok(());
}
None => {
debug!("orchestrator: not provisioned; continuing");
}
}
// 2) Device discovery using compiled filter from config.
let filter = build_device_filter(&ctx.cfg)?;
let disks = discover(&filter)?;
info!("orchestrator: discovered {} eligible disk(s)", disks.len());
// 3) Emptiness enforcement: skip in preview mode (--show/--report) to allow planning output.
let preview = ctx.show || ctx.report_path_override.is_some();
if ctx.cfg.partitioning.require_empty_disks && !preview {
enforce_empty_disks(&disks)?;
info!("orchestrator: all target disks verified empty");
} else if ctx.cfg.partitioning.require_empty_disks && preview {
warn!("orchestrator: preview mode detected (--show/--report); skipping empty-disk enforcement");
} else {
warn!("orchestrator: require_empty_disks=false; proceeding without emptiness enforcement");
}
// 4) Partition planning (declarative only; application not yet implemented in this step).
let plan = partition::plan_partitions(&disks, &ctx.cfg)?;
debug!(
"orchestrator: partition plan ready (alignment={} MiB, disks={})",
plan.alignment_mib,
plan.disks.len()
);
for dp in &plan.disks {
debug!("plan for {}: {} part(s)", dp.disk.path, dp.parts.len());
}
// Note:
// - Applying partitions, creating filesystems, mounting, and reporting
// will be wired in subsequent steps. For now this performs pre-flight
// checks and planning to exercise real code paths safely.
info!("orchestrator: pre-flight complete (idempotency checked, devices discovered, plan computed)");
// Optional: emit JSON summary via --show or write via --report
if ctx.show || ctx.report_path_override.is_some() {
let summary = build_summary_json(&disks, &plan, &ctx.cfg)?;
if ctx.show {
// Print compact JSON to stdout
println!("{}", summary);
}
if let Some(path) = &ctx.report_path_override {
// Best-effort write (non-atomic for now, pending report::write_report implementation)
fs::write(path, summary.to_string()).map_err(|e| {
Error::Report(format!("failed to write report to {}: {}", path, e))
})?;
info!("orchestrator: wrote summary report to {}", path);
}
}
Ok(())
}
fn build_device_filter(cfg: &Config) -> Result<DeviceFilter> {
let mut include = Vec::new();
let mut exclude = Vec::new();
for pat in &cfg.device_selection.include_patterns {
let re = Regex::new(pat).map_err(|e| {
Error::Validation(format!("invalid include regex '{}': {}", pat, e))
})?;
include.push(re);
}
for pat in &cfg.device_selection.exclude_patterns {
let re = Regex::new(pat).map_err(|e| {
Error::Validation(format!("invalid exclude regex '{}': {}", pat, e))
})?;
exclude.push(re);
}
Ok(DeviceFilter {
include,
exclude,
min_size_gib: cfg.device_selection.min_size_gib,
allow_removable: cfg.device_selection.allow_removable,
})
}
fn enforce_empty_disks(disks: &[Disk]) -> Result<()> {
for d in disks {
let empty = idempotency::is_empty_disk(d)?;
if !empty {
return Err(Error::Validation(format!(
"target disk {} is not empty (partitions or signatures present)",
d.path
)));
}
}
Ok(())
}
fn role_str(role: partition::PartRole) -> &'static str {
match role {
partition::PartRole::BiosBoot => "bios_boot",
partition::PartRole::Esp => "esp",
partition::PartRole::Data => "data",
partition::PartRole::Cache => "cache",
}
}
fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Config) -> Result<serde_json::Value> {
// Disks summary
let disks_json: Vec<serde_json::Value> = disks
.iter()
.map(|d| {
json!({
"path": d.path,
"size_bytes": d.size_bytes,
"rotational": d.rotational,
"model": d.model,
"serial": d.serial,
})
})
.collect();
// Partition plan summary (spec-level)
let mut plan_json: Vec<serde_json::Value> = Vec::new();
for dp in &plan.disks {
let parts: Vec<serde_json::Value> = dp
.parts
.iter()
.map(|p| {
json!({
"role": role_str(p.role),
"size_mib": p.size_mib, // null means "remainder"
"gpt_name": p.gpt_name,
})
})
.collect();
plan_json.push(json!({
"disk": dp.disk.path,
"parts": parts
}));
}
// Decide filesystem kinds and planned mountpoints (template) from plan + cfg.topology
let topo_str = match cfg.topology {
crate::types::Topology::Single => "single",
crate::types::Topology::DualIndependent => "dual_independent",
crate::types::Topology::SsdHddBcachefs => "ssd_hdd_bcachefs",
crate::types::Topology::BtrfsRaid1 => "btrfs_raid1",
};
// Count roles across plan to infer filesystems
let mut esp_count = 0usize;
let mut data_count = 0usize;
let mut cache_count = 0usize;
for dp in &plan.disks {
for p in &dp.parts {
match p.role {
partition::PartRole::Esp => esp_count += 1,
partition::PartRole::Data => data_count += 1,
partition::PartRole::Cache => cache_count += 1,
partition::PartRole::BiosBoot => {}
}
}
}
let mut filesystems_planned: Vec<serde_json::Value> = Vec::new();
// ESP -> vfat (typically mounted by bootloader; no runtime target here)
if esp_count > 0 {
filesystems_planned.push(json!({
"kind": "vfat",
"from_roles": ["esp"],
"label": cfg.filesystem.vfat.label,
"planned_mountpoint": null
}));
}
// Data/cache-driven FS + mount targets. Mount scheme is per-UUID under base_dir.
let target_template = format!("{}/{{UUID}}", cfg.mount.base_dir);
match cfg.topology {
crate::types::Topology::SsdHddBcachefs => {
if cache_count > 0 && data_count > 0 {
filesystems_planned.push(json!({
"kind": "bcachefs",
"from_roles": ["cache", "data"],
"label": cfg.filesystem.bcachefs.label,
"planned_mountpoint_template": target_template,
}));
}
}
crate::types::Topology::BtrfsRaid1 => {
// One multi-device btrfs across all data partitions
if data_count >= 2 {
filesystems_planned.push(json!({
"kind": "btrfs",
"from_roles": ["data"],
"devices_planned": data_count,
"label": cfg.filesystem.btrfs.label,
"planned_mountpoint_template": target_template,
}));
} else if data_count == 1 {
filesystems_planned.push(json!({
"kind": "btrfs",
"from_roles": ["data"],
"label": cfg.filesystem.btrfs.label,
"planned_mountpoint_template": target_template,
"note": "only one data partition present; raid1 requires >= 2",
}));
}
}
_ => {
// One btrfs per data partition
for _ in 0..data_count {
filesystems_planned.push(json!({
"kind": "btrfs",
"from_roles": ["data"],
"label": cfg.filesystem.btrfs.label,
"planned_mountpoint_template": target_template,
}));
}
}
}
let mount_scheme = json!({
"scheme": "per_uuid",
"base_dir": cfg.mount.base_dir,
"fstab_enabled": cfg.mount.fstab_enabled,
"target_template": target_template,
});
let now = format_rfc3339(SystemTime::now()).to_string();
let summary = json!({
"version": "v1",
"timestamp": now,
"status": "planned",
"topology": topo_str,
"alignment_mib": plan.alignment_mib,
"require_empty_disks": plan.require_empty_disks,
"disks": disks_json,
"partition_plan": plan_json,
"filesystems_planned": filesystems_planned,
"mount": mount_scheme
});
Ok(summary)
}