Compare commits
8 Commits
d374176c0b
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 4cd8c54c44 | |||
| 224adf06d8 | |||
| 69370a2f53 | |||
| 3d14f77516 | |||
| 5746e285b2 | |||
| cc126d77b4 | |||
| 285adeead4 | |||
| c8b76a2a3d |
38
README.md
38
README.md
@@ -20,14 +20,14 @@ Key modules
|
||||
- [src/partition/plan.rs](src/partition/plan.rs)
|
||||
- Filesystem planning/creation and mkfs integration:
|
||||
- [src/fs/plan.rs](src/fs/plan.rs)
|
||||
- Mount planning and application (skeleton):
|
||||
- Mount planning and application:
|
||||
- [src/mount/ops.rs](src/mount/ops.rs)
|
||||
|
||||
Features at a glance
|
||||
- Topology-driven planning with built-in defaults: BtrfsSingle, BcachefsSingle, DualIndependent, Bcachefs2Copy, BtrfsRaid1, SsdHddBcachefs
|
||||
- Non-destructive preview: --show/--report outputs JSON summary (disks, partition plan, filesystems, planned mountpoints)
|
||||
- Topology auto-selection with built-in defaults; optional kernel cmdline override via `zosstorage.topology=` (see ADR-0002)
|
||||
- Non-destructive preview: `--show`/`--report` outputs JSON summary (disks, partition plan, filesystems, planned mountpoints)
|
||||
- Safe discovery: excludes removable media by default (USB sticks) unless explicitly allowed
|
||||
- Config-optional: the tool runs without any YAML; sensible defaults are always present and may be overridden/merged by config
|
||||
- No external YAML configuration; defaults-only per ADR-0002 (sane built-ins, topology may be overridden by kernel cmdline)
|
||||
|
||||
Requirements
|
||||
- Linux with /proc and /sys mounted (initramfs friendly)
|
||||
@@ -45,8 +45,6 @@ Install and build
|
||||
Binary is target/release/zosstorage.
|
||||
|
||||
CLI usage
|
||||
- Topology selection (config optional):
|
||||
-t, --topology btrfs-single|bcachefs-single|dual-independent|bcachefs-2copy|btrfs-raid1|ssd-hdd-bcachefs
|
||||
- Preview (non-destructive):
|
||||
--show Print JSON summary to stdout
|
||||
--report PATH Write JSON summary to a file
|
||||
@@ -56,22 +54,30 @@ CLI usage
|
||||
-l, --log-level LEVEL error|warn|info|debug (default: info)
|
||||
-L, --log-to-file Also write logs to /run/zosstorage/zosstorage.log
|
||||
- Other:
|
||||
-c, --config PATH Merge a YAML config file (overrides defaults)
|
||||
-s, --fstab Enable writing /etc/fstab entries (when mounts are applied)
|
||||
-a, --apply Perform partitioning, filesystem creation, and mounts (destructive)
|
||||
-f, --force Present but not implemented (returns an error)
|
||||
|
||||
Deprecated (ignored with warning; see ADR-0002)
|
||||
-t, --topology VALUE Ignored; use kernel cmdline `zosstorage.topology=` instead
|
||||
-c, --config PATH Ignored; external YAML configuration is not used at runtime
|
||||
|
||||
Examples
|
||||
- Single disk plan with debug logs:
|
||||
sudo ./zosstorage --show -t btrfs-single -l debug
|
||||
- RAID1 btrfs across two disks; print and write summary:
|
||||
sudo ./zosstorage --show --report /run/zosstorage/plan.json -t btrfs-raid1 -l debug -L
|
||||
- SSD+HDD bcachefs plan, include removable devices (for lab cases):
|
||||
sudo ./zosstorage --show -t ssd-hdd-bcachefs --allow-removable -l debug
|
||||
- Single disk plan with debug logs (defaults to btrfs_single automatically):
|
||||
sudo ./zosstorage --show -l debug
|
||||
- Two-disk plan (defaults to dual_independent automatically), write summary:
|
||||
sudo ./zosstorage --show --report /run/zosstorage/plan.json -l debug -L
|
||||
- Include removable devices for lab scenarios:
|
||||
sudo ./zosstorage --show --allow-removable -l debug
|
||||
- Quiet plan to file:
|
||||
sudo ./zosstorage --report /run/zosstorage/plan.json -t dual-independent
|
||||
- Apply single-disk btrfs (DESTRUCTIVE; wipes target disk):
|
||||
sudo ./zosstorage --apply -t btrfs-single
|
||||
sudo ./zosstorage --report /run/zosstorage/plan.json
|
||||
- Apply single-disk plan (DESTRUCTIVE; wipes target disk; defaults select topology automatically):
|
||||
sudo ./zosstorage --apply
|
||||
|
||||
Kernel cmdline override (at boot)
|
||||
- To force a topology, pass one of:
|
||||
zosstorage.topology=btrfs-single | bcachefs-single | dual-independent | btrfs-raid1 | ssd-hdd-bcachefs | bcachefs-2copy
|
||||
- The override affects only topology; all other settings use sane built-in defaults.
|
||||
|
||||
Preview JSON shape (examples)
|
||||
1) Already provisioned (idempotency success):
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
# zosstorage example configuration (full surface)
|
||||
# Copy to /etc/zosstorage/config.yaml on the target system, or pass with:
|
||||
# - CLI: --config /path/to/your.yaml
|
||||
# - Kernel cmdline: zosstorage.config=/path/to/your.yaml
|
||||
# Precedence (highest to lowest):
|
||||
# kernel cmdline > CLI flags > CLI --config file > /etc/zosstorage/config.yaml > built-in defaults
|
||||
|
||||
version: 1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Logging
|
||||
# -----------------------------------------------------------------------------
|
||||
logging:
|
||||
# one of: error, warn, info, debug
|
||||
level: info
|
||||
# when true, also logs to /run/zosstorage/zosstorage.log in initramfs
|
||||
to_file: false
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Device selection rules
|
||||
# - include_patterns: device paths that are considered
|
||||
# - exclude_patterns: device paths to filter out
|
||||
# - allow_removable: future toggle for removable media (kept false by default)
|
||||
# - min_size_gib: ignore devices smaller than this size
|
||||
# -----------------------------------------------------------------------------
|
||||
device_selection:
|
||||
include_patterns:
|
||||
- "^/dev/sd\\w+$"
|
||||
- "^/dev/nvme\\w+n\\d+$"
|
||||
- "^/dev/vd\\w+$"
|
||||
exclude_patterns:
|
||||
- "^/dev/ram\\d+$"
|
||||
- "^/dev/zram\\d+$"
|
||||
- "^/dev/loop\\d+$"
|
||||
- "^/dev/fd\\d+$"
|
||||
allow_removable: false
|
||||
min_size_gib: 10
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Desired topology (choose ONE)
|
||||
# single : Single eligible disk; btrfs on data
|
||||
# dual_independent : Two disks; independent btrfs on each
|
||||
# ssd_hdd_bcachefs : SSD + HDD; bcachefs with SSD as cache/promote and HDD backing
|
||||
# btrfs_raid1 : Optional mirrored btrfs across two disks (only when explicitly requested)
|
||||
# -----------------------------------------------------------------------------
|
||||
topology:
|
||||
mode: single
|
||||
# mode: dual_independent
|
||||
# mode: ssd_hdd_bcachefs
|
||||
# mode: btrfs_raid1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Partitioning (GPT only)
|
||||
# Reserved GPT names:
|
||||
# - bios boot : "zosboot" (tiny BIOS boot partition, non-FS)
|
||||
# - ESP : "zosboot" (FAT32)
|
||||
# - Data : "zosdata"
|
||||
# - Cache : "zoscache" (only for ssd_hdd_bcachefs)
|
||||
# Reserved filesystem labels:
|
||||
# - ESP : ZOSBOOT
|
||||
# - Data (all filesystems including bcachefs): ZOSDATA
|
||||
# -----------------------------------------------------------------------------
|
||||
partitioning:
|
||||
# 1 MiB alignment
|
||||
alignment_mib: 1
|
||||
|
||||
# Abort if any target disk is not empty (required for safety)
|
||||
require_empty_disks: true
|
||||
|
||||
bios_boot:
|
||||
enabled: true
|
||||
size_mib: 1
|
||||
gpt_name: zosboot
|
||||
|
||||
esp:
|
||||
size_mib: 512
|
||||
label: ZOSBOOT
|
||||
gpt_name: zosboot
|
||||
|
||||
data:
|
||||
gpt_name: zosdata
|
||||
|
||||
# Only used in ssd_hdd_bcachefs
|
||||
cache:
|
||||
gpt_name: zoscache
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Filesystem options and tuning
|
||||
# All data filesystems (btrfs or bcachefs) use label ZOSDATA
|
||||
# ESP uses label ZOSBOOT
|
||||
# -----------------------------------------------------------------------------
|
||||
filesystem:
|
||||
btrfs:
|
||||
# Reserved; must be "ZOSDATA"
|
||||
label: ZOSDATA
|
||||
# e.g., "zstd:3", "zstd:5"
|
||||
compression: zstd:3
|
||||
# "none" | "raid1" (raid1 typically when topology.mode == btrfs_raid1)
|
||||
raid_profile: none
|
||||
|
||||
bcachefs:
|
||||
# Reserved; must be "ZOSDATA"
|
||||
label: ZOSDATA
|
||||
# "promote" (default) or "writeback" if supported by environment
|
||||
cache_mode: promote
|
||||
# Compression algorithm, e.g., "zstd"
|
||||
compression: zstd
|
||||
# Checksum algorithm, e.g., "crc32c"
|
||||
checksum: crc32c
|
||||
|
||||
vfat:
|
||||
# Reserved; must be "ZOSBOOT"
|
||||
label: ZOSBOOT
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Mount scheme and optional fstab
|
||||
# Default behavior mounts data filesystems under /var/cache/<UUID>
|
||||
# -----------------------------------------------------------------------------
|
||||
mount:
|
||||
# Base directory for mounts
|
||||
base_dir: /var/cache
|
||||
# Scheme: per_uuid | custom (custom reserved for future)
|
||||
scheme: per_uuid
|
||||
# When true, zosstorage will generate /etc/fstab entries in deterministic order
|
||||
fstab_enabled: false
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Report output
|
||||
# JSON report is written after successful provisioning
|
||||
# -----------------------------------------------------------------------------
|
||||
report:
|
||||
path: /run/zosstorage/state.json
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Examples for different topologies (uncomment and set topology.mode accordingly)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Example: single disk (uses btrfs on data)
|
||||
# topology:
|
||||
# mode: single
|
||||
# filesystem:
|
||||
# btrfs:
|
||||
# label: ZOSDATA
|
||||
# compression: zstd:3
|
||||
# raid_profile: none
|
||||
|
||||
# Example: dual independent btrfs (two disks)
|
||||
# topology:
|
||||
# mode: dual_independent
|
||||
# filesystem:
|
||||
# btrfs:
|
||||
# label: ZOSDATA
|
||||
# compression: zstd:5
|
||||
# raid_profile: none
|
||||
|
||||
# Example: SSD + HDD with bcachefs
|
||||
# topology:
|
||||
# mode: ssd_hdd_bcachefs
|
||||
# partitioning:
|
||||
# cache:
|
||||
# gpt_name: zoscache
|
||||
# filesystem:
|
||||
# bcachefs:
|
||||
# label: ZOSDATA
|
||||
# cache_mode: promote
|
||||
# compression: zstd
|
||||
# checksum: crc32c
|
||||
|
||||
# Example: btrfs RAID1 (two disks)
|
||||
# topology:
|
||||
# mode: btrfs_raid1
|
||||
# filesystem:
|
||||
# btrfs:
|
||||
# label: ZOSDATA
|
||||
# compression: zstd:3
|
||||
# raid_profile: raid1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Notes:
|
||||
# - Never modify devices outside include_patterns or inside exclude_patterns.
|
||||
# - Idempotency: if expected GPT names and filesystem labels are already present,
|
||||
# zosstorage exits success without making changes.
|
||||
# - --force flag is reserved and not implemented; will return an "unimplemented" error.
|
||||
# - Kernel cmdline data: URLs for zosstorage.config= are currently unimplemented.
|
||||
# -----------------------------------------------------------------------------
|
||||
@@ -75,7 +75,7 @@ Configuration types
|
||||
- [struct Config](../src/types.rs:1)
|
||||
- The validated configuration used by the orchestrator, containing logging, device selection rules, topology, partitioning, filesystem options, mount scheme, and report path.
|
||||
- [enum Topology](../src/types.rs:1)
|
||||
- Values: btrfs_single, bcachefs_single, dual_independent, bcachefs2_copy, ssd_hdd_bcachefs, btrfs_raid1 (opt-in).
|
||||
- Values: btrfs_single, bcachefs_single, dual_independent, bcachefs-2copy, ssd_hdd_bcachefs, btrfs_raid1 (opt-in).
|
||||
- [struct DeviceSelection](../src/types.rs:1)
|
||||
- Include and exclude regex patterns, minimum size, removable policy.
|
||||
- [struct Partitioning](../src/types.rs:1)
|
||||
@@ -201,7 +201,7 @@ Behavioral notes and contracts
|
||||
- btrfs_single: one data filesystem (btrfs) on the sole disk.
|
||||
- bcachefs_single: one data filesystem (bcachefs) on the sole disk.
|
||||
- dual_independent: independent btrfs filesystems on each eligible disk (one or more).
|
||||
- bcachefs2_copy: multi-device bcachefs across two or more data partitions with `--replicas=2` (data and metadata).
|
||||
- bcachefs-2copy: multi-device bcachefs across two or more data partitions with `--replicas=2` (data and metadata).
|
||||
- ssd_hdd_bcachefs: bcachefs spanning SSD (cache/promote) and HDD (backing), labeled ZOSDATA.
|
||||
- btrfs_raid1: only when explicitly requested; otherwise default to independent btrfs.
|
||||
- UEFI vs BIOS: when running under UEFI (`/sys/firmware/efi` present), the BIOS boot partition is suppressed.
|
||||
|
||||
1716
docs/Callgraph.svg
Normal file
1716
docs/Callgraph.svg
Normal file
File diff suppressed because it is too large
Load Diff
|
After Width: | Height: | Size: 122 KiB |
294
docs/FUNCTION_LIST.md
Normal file
294
docs/FUNCTION_LIST.md
Normal file
@@ -0,0 +1,294 @@
|
||||
# Function Reference - Call Graph Analysis
|
||||
|
||||
> This documentation is automatically derived from [`Callgraph.svg`](Callgraph.svg) and provides a comprehensive overview of all functions in the zosstorage project, organized by module.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Main Entry Points](#main-entry-points)
|
||||
- [CLI & Configuration](#cli--configuration)
|
||||
- [Orchestration](#orchestration)
|
||||
- [Device Discovery](#device-discovery)
|
||||
- [Partition Management](#partition-management)
|
||||
- [Filesystem Operations](#filesystem-operations)
|
||||
- [Mount Operations](#mount-operations)
|
||||
- [Idempotency & State](#idempotency--state)
|
||||
- [Reporting](#reporting)
|
||||
- [Utilities](#utilities)
|
||||
- [Logging](#logging)
|
||||
- [Type Definitions](#type-definitions)
|
||||
|
||||
---
|
||||
|
||||
## Main Entry Points
|
||||
|
||||
### [`src/main.rs`](../src/main.rs)
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `main()` | Application entry point; initializes the program and handles top-level errors |
|
||||
| `real_main()` | Core application logic; orchestrates the main workflow after initialization |
|
||||
|
||||
---
|
||||
|
||||
## CLI & Configuration
|
||||
|
||||
### [`src/cli/args.rs`](../src/cli/args.rs)
|
||||
|
||||
**Structs:** `Cli`, `LogLevelArg` (enum)
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `from_args()` | Parses command-line arguments and returns a `Cli` configuration object |
|
||||
|
||||
### [`src/config/loader.rs`](../src/config/loader.rs)
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `load_and_merge()` | Loads configuration from multiple sources and merges them into a unified config |
|
||||
| `validate()` | Validates the merged configuration for correctness and completeness |
|
||||
| `to_value()` | Converts configuration structures to internal value representation |
|
||||
| `merge_value()` | Recursively merges configuration values, handling conflicts appropriately |
|
||||
| `cli_overlay_value()` | Overlays CLI-provided values onto existing configuration |
|
||||
| `kernel_cmdline_topology()` | Extracts topology information from kernel command line parameters |
|
||||
| `parse_topology_token()` | Parses individual topology tokens from kernel cmdline |
|
||||
| `default_config()` | Generates default configuration values when no config file is present |
|
||||
|
||||
---
|
||||
|
||||
## Orchestration
|
||||
|
||||
### [`src/orchestrator/run.rs`](../src/orchestrator/run.rs)
|
||||
|
||||
**Structs:** `Context`
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `Context::new()` | Creates a new orchestration context with default settings |
|
||||
| `Context::with_show()` | Builder method to enable show/dry-run mode |
|
||||
| `Context::with_apply()` | Builder method to enable apply mode (actual execution) |
|
||||
| `Context::with_report_path()` | Builder method to set the report output path |
|
||||
| `Context::with_mount_existing()` | Builder method to configure mounting of existing filesystems |
|
||||
| `Context::with_report_current()` | Builder method to enable reporting of current system state |
|
||||
| `Context::with_topology_from_cli()` | Builder method to set topology from CLI arguments |
|
||||
| `Context::with_topology_from_cmdline()` | Builder method to set topology from kernel cmdline |
|
||||
| `run()` | Main orchestration function; coordinates all storage operations |
|
||||
| `build_device_filter()` | Constructs device filter based on configuration and user input |
|
||||
| `enforce_empty_disks()` | Validates that target disks are empty before proceeding |
|
||||
| `role_str()` | Converts partition role enum to human-readable string |
|
||||
| `build_summary_json()` | Builds a JSON summary of operations performed |
|
||||
|
||||
---
|
||||
|
||||
## Device Discovery
|
||||
|
||||
### [`src/device/discovery.rs`](../src/device/discovery.rs)
|
||||
|
||||
**Structs:** `Disk`, `DeviceFilter`, `SysProvider`
|
||||
**Traits:** `DeviceProvider`
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `DeviceFilter::matches()` | Checks if a device matches the configured filter criteria |
|
||||
| `SysProvider::new()` | Creates a new sysfs-based device provider |
|
||||
| `SysProvider::list_block_devices()` | Lists all block devices found via sysfs |
|
||||
| `SysProvider::probe_properties()` | Probes detailed properties of a specific device |
|
||||
| `discover()` | Entry point for device discovery using default provider |
|
||||
| `discover_with_provider()` | Device discovery with custom provider (for testing/flexibility) |
|
||||
| `is_ignored_name()` | Checks if device name should be ignored (loop, ram, etc.) |
|
||||
| `sys_block_path()` | Constructs sysfs path for a given block device |
|
||||
| `base_name()` | Extracts base device name from path |
|
||||
| `is_removable_sysfs()` | Checks if device is removable via sysfs |
|
||||
| `is_partition_sysfs()` | Checks if device is a partition via sysfs |
|
||||
| `read_disk_size_bytes()` | Reads disk size in bytes from sysfs |
|
||||
| `read_rotational()` | Determines if disk is rotational (HDD) or not (SSD) |
|
||||
| `read_model_serial()` | Reads device model and serial number |
|
||||
| `read_optional_string()` | Utility to safely read optional string values from sysfs |
|
||||
|
||||
---
|
||||
|
||||
## Partition Management
|
||||
|
||||
### [`src/partition/plan.rs`](../src/partition/plan.rs)
|
||||
|
||||
**Structs:** `PartitionSpec`, `DiskPlan`, `PartitionPlan`, `PartitionResult`
|
||||
**Enums:** `PartRole`
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `plan_partitions()` | Creates partition plans for all target disks based on topology |
|
||||
| `apply_partitions()` | Executes partition plans using sgdisk tool |
|
||||
| `type_code()` | Returns GPT partition type code for a given partition role |
|
||||
| `part_dev_path()` | Constructs device path for a partition (e.g., /dev/sda1) |
|
||||
| `sector_size_bytes()` | Reads logical sector size of disk |
|
||||
| `parse_sgdisk_info()` | Parses output from sgdisk to extract partition information |
|
||||
|
||||
---
|
||||
|
||||
## Filesystem Operations
|
||||
|
||||
### [`src/fs/plan.rs`](../src/fs/plan.rs)
|
||||
|
||||
**Structs:** `FsSpec`, `FsPlan`, `FsResult`
|
||||
**Enums:** `FsKind`
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `plan_filesystems()` | Plans filesystem creation for all partitions |
|
||||
| `make_filesystems()` | Creates filesystems according to plan (mkfs.* tools) |
|
||||
| `capture_uuid()` | Captures UUID of newly created filesystem |
|
||||
| `parse_blkid_export()` | Parses blkid export format to extract filesystem metadata |
|
||||
| `probe_existing_filesystems()` | Detects existing filesystems on partitions |
|
||||
|
||||
---
|
||||
|
||||
## Mount Operations
|
||||
|
||||
### [`src/mount/ops.rs`](../src/mount/ops.rs)
|
||||
|
||||
**Structs:** `PlannedMount`, `PlannedSubvolMount`, `MountPlan`, `MountResult`
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `fstype_str()` | Converts FsKind enum to mount filesystem type string |
|
||||
| `plan_mounts()` | Creates mount plans for all filesystems |
|
||||
| `apply_mounts()` | Executes mount operations and creates mount points |
|
||||
| `maybe_write_fstab()` | Conditionally writes /etc/fstab entries for persistent mounts |
|
||||
|
||||
---
|
||||
|
||||
## Idempotency & State
|
||||
|
||||
### [`src/idempotency/mod.rs`](../src/idempotency/mod.rs)
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `detect_existing_state()` | Detects existing partitions and filesystems to avoid destructive operations |
|
||||
| `is_empty_disk()` | Checks if a disk has no partition table or filesystems |
|
||||
| `parse_blkid_export()` | Parses blkid output to identify existing filesystems |
|
||||
| `read_proc_partitions_names()` | Reads partition names from /proc/partitions |
|
||||
| `base_name()` | Extracts base name from device path |
|
||||
| `is_partition_of()` | Checks if one device is a partition of another |
|
||||
|
||||
---
|
||||
|
||||
## Reporting
|
||||
|
||||
### [`src/report/state.rs`](../src/report/state.rs)
|
||||
|
||||
**Structs:** `StateReport`
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `build_report()` | Builds comprehensive state report of operations performed |
|
||||
| `write_report()` | Writes report to specified output path (JSON format) |
|
||||
|
||||
---
|
||||
|
||||
## Utilities
|
||||
|
||||
### [`src/util/mod.rs`](../src/util/mod.rs)
|
||||
|
||||
**Structs:** `CmdOutput`
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `which_tool()` | Locates external tool in PATH (sgdisk, mkfs.*, etc.) |
|
||||
| `run_cmd()` | Executes shell command and returns exit status |
|
||||
| `run_cmd_capture()` | Executes command and captures stdout/stderr |
|
||||
| `udev_settle()` | Waits for udev to process device events |
|
||||
| `is_efi_boot()` | Detects if system booted in EFI mode |
|
||||
|
||||
---
|
||||
|
||||
## Logging
|
||||
|
||||
### [`src/logging/mod.rs`](../src/logging/mod.rs)
|
||||
|
||||
**Structs:** `LogOptions`
|
||||
|
||||
| Function | Purpose |
|
||||
|----------|---------|
|
||||
| `LogOptions::from_cli()` | Creates logging configuration from CLI arguments |
|
||||
| `level_from_str()` | Converts string to log level enum |
|
||||
| `init_logging()` | Initializes logging subsystem with configured options |
|
||||
|
||||
---
|
||||
|
||||
## Type Definitions
|
||||
|
||||
### [`src/types.rs`](../src/types.rs)
|
||||
|
||||
**Core Configuration Structures:**
|
||||
|
||||
- `Config` - Top-level configuration structure
|
||||
- `LoggingConfig` - Logging configuration
|
||||
- `DeviceSelection` - Device selection criteria
|
||||
- `Topology` - Storage topology definition (enum)
|
||||
- `Partitioning` - Partition layout specification
|
||||
- `BiosBootSpec`, `EspSpec`, `DataSpec`, `CacheSpec` - Partition type specifications
|
||||
- `FsOptions`, `BtrfsOptions`, `BcachefsOptions`, `VfatOptions` - Filesystem options
|
||||
- `MountScheme`, `MountSchemeKind` - Mount configuration
|
||||
- `ReportOptions` - Report generation configuration
|
||||
|
||||
### [`src/errors.rs`](../src/errors.rs)
|
||||
|
||||
**Error Types:**
|
||||
|
||||
- `Error` - Main error enum for all error conditions
|
||||
- `Result<T>` - Type alias for `std::result::Result<T, Error>`
|
||||
|
||||
---
|
||||
|
||||
## Call Graph Relationships
|
||||
|
||||
### Main Execution Flow
|
||||
|
||||
```
|
||||
main() → real_main() → orchestrator::run()
|
||||
↓
|
||||
├─→ cli::from_args()
|
||||
├─→ config::load_and_merge()
|
||||
├─→ logging::init_logging()
|
||||
├─→ device::discover()
|
||||
├─→ partition::plan_partitions()
|
||||
├─→ partition::apply_partitions()
|
||||
├─→ fs::plan_filesystems()
|
||||
├─→ fs::make_filesystems()
|
||||
├─→ mount::plan_mounts()
|
||||
├─→ mount::apply_mounts()
|
||||
└─→ report::build_report() / write_report()
|
||||
```
|
||||
|
||||
### Key Dependencies
|
||||
|
||||
- **Orchestrator** (`run()`) calls: All major subsystems
|
||||
- **Device Discovery** uses: Utilities for system probing
|
||||
- **Partition/FS/Mount** operations use: Utilities for command execution
|
||||
- **All operations** call: `util::run_cmd()` or `util::run_cmd_capture()`
|
||||
- **Idempotency checks** called by: Orchestrator before destructive operations
|
||||
|
||||
---
|
||||
|
||||
## Function Count Summary
|
||||
|
||||
- **Main Entry**: 2 functions
|
||||
- **CLI & Config**: 9 functions
|
||||
- **Orchestration**: 13 functions
|
||||
- **Device Discovery**: 14 functions
|
||||
- **Partition Management**: 6 functions
|
||||
- **Filesystem Operations**: 5 functions
|
||||
- **Mount Operations**: 4 functions
|
||||
- **Idempotency**: 6 functions
|
||||
- **Reporting**: 2 functions
|
||||
- **Utilities**: 6 functions
|
||||
- **Logging**: 3 functions
|
||||
|
||||
**Total: 70 documented functions** across 15 source files
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- Original call graph visualization: [`docs/Callgraph.svg`](Callgraph.svg)
|
||||
- Architecture documentation: [`docs/ARCHITECTURE.md`](ARCHITECTURE.md)
|
||||
- API documentation: [`docs/API.md`](API.md)
|
||||
@@ -1,27 +1,16 @@
|
||||
# zosstorage Configuration Schema
|
||||
# zosstorage Configuration (Deprecated schema)
|
||||
|
||||
This document defines the YAML configuration for the initramfs-only disk provisioning utility and the exact precedence rules between configuration sources. It complements [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md).
|
||||
This schema document is deprecated per [docs/adr/0002-defaults-only-no-external-config.md](docs/adr/0002-defaults-only-no-external-config.md). Runtime now uses defaults-only with a single optional kernel cmdline override. The YAML configuration file is not read at boot.
|
||||
|
||||
Canonical paths and keys
|
||||
- Kernel cmdline key: zosstorage.config=
|
||||
- Default config file path: /etc/zosstorage/config.yaml
|
||||
- JSON state report path: /run/zosstorage/state.json
|
||||
- Optional log file path: /run/zosstorage/zosstorage.log
|
||||
- fstab generation: disabled by default
|
||||
- Reserved filesystem labels: ZOSBOOT (ESP), ZOSDATA (all data filesystems)
|
||||
- GPT partition names: zosboot, zosdata, zoscache
|
||||
Active behavior (ADR-0002)
|
||||
- Defaults-only: all settings are defined in code. No /etc/zosstorage/config.yaml is read.
|
||||
- Optional kernel cmdline override: `zosstorage.topology=VALUE` can override only the topology. Legacy alias `zosstorage.topo=` is accepted.
|
||||
- CLI: `--config` and `--topology` are deprecated and ignored (warnings emitted). Operational flags remain (`--apply`, `--show`, `--report`, `--fstab`, logging).
|
||||
- Report path: `/run/zosstorage/state.json`. Optional log file: `/run/zosstorage/zosstorage.log`.
|
||||
- Reserved labels: `ZOSBOOT` (ESP), `ZOSDATA` (data). GPT names: `zosboot`, `zosdata`, `zoscache`.
|
||||
|
||||
Precedence and merge strategy
|
||||
1. Start from built-in defaults documented here.
|
||||
2. Merge in the on-disk config file if present at /etc/zosstorage/config.yaml.
|
||||
3. Merge CLI flags next; these override file values.
|
||||
4. Merge kernel cmdline last; zosstorage.config= overrides CLI and file.
|
||||
5. No interactive prompts are permitted.
|
||||
|
||||
The kernel cmdline key zosstorage.config= accepts:
|
||||
- A path to a YAML file inside the initramfs root (preferred).
|
||||
- A file: absolute path (e.g., file:/run/config/zos.yaml).
|
||||
- A data: URL containing base64 YAML (optional extension).
|
||||
Historical reference (original YAML-based schema, no longer used at runtime)
|
||||
The remainder of this document preserves the previous YAML schema for archival purposes only.
|
||||
|
||||
Top-level YAML structure
|
||||
|
||||
@@ -43,7 +32,7 @@ device_selection:
|
||||
allow_removable: false # future option; default false
|
||||
min_size_gib: 10 # ignore devices smaller than this (default 10)
|
||||
topology: # desired overall layout; see values below
|
||||
mode: btrfs_single # btrfs_single | bcachefs_single | dual_independent | bcachefs2_copy | ssd_hdd_bcachefs | btrfs_raid1
|
||||
mode: btrfs_single # btrfs_single | bcachefs_single | dual_independent | bcachefs-2copy | ssd_hdd_bcachefs | btrfs_raid1
|
||||
partitioning:
|
||||
alignment_mib: 1 # GPT alignment in MiB
|
||||
require_empty_disks: true # abort if any partition or FS signatures exist
|
||||
@@ -84,7 +73,7 @@ Topology modes
|
||||
- btrfs_single: One eligible disk. Create BIOS boot (if enabled), ESP 512 MiB, remainder as data. Create a btrfs filesystem labeled ZOSDATA on the data partition.
|
||||
- bcachefs_single: One eligible disk. Create BIOS boot (if enabled), ESP 512 MiB, remainder as data. Create a bcachefs filesystem labeled ZOSDATA on the data partition.
|
||||
- dual_independent: One or more eligible disks. On each disk, create BIOS boot (if enabled) + ESP + data. Create an independent btrfs filesystem labeled ZOSDATA on each data partition. No RAID by default.
|
||||
- bcachefs2_copy: Two or more eligible disks (minimum 2). Create data partitions and then a single multi-device bcachefs labeled ZOSDATA spanning those data partitions. The mkfs step uses `--replicas=2` (data and metadata).
|
||||
- bcachefs-2copy: Two or more eligible disks (minimum 2). Create data partitions and then a single multi-device bcachefs labeled ZOSDATA spanning those data partitions. The mkfs step uses `--replicas=2` (data and metadata).
|
||||
- ssd_hdd_bcachefs: One SSD/NVMe and one HDD. Create BIOS boot (if enabled) + ESP on both as required. Create cache (on SSD) and data/backing (on HDD) partitions named zoscache and zosdata respectively. Create a bcachefs labeled ZOSDATA across SSD(HDD) per policy (SSD cache/promote; HDD backing).
|
||||
- btrfs_raid1: Optional mode if explicitly requested. Create mirrored btrfs across two disks for the data role with raid1 profile. Not enabled by default.
|
||||
|
||||
|
||||
@@ -184,7 +184,7 @@ Per-topology specifics
|
||||
- btrfs_single: All roles on the single disk; data formatted as btrfs.
|
||||
- bcachefs_single: All roles on the single disk; data formatted as bcachefs.
|
||||
- dual_independent: On each eligible disk (one or more), create BIOS boot (if applicable), ESP, and data.
|
||||
- bcachefs_2copy: Create data partitions on two or more disks; later formatted as one multi-device bcachefs spanning all data partitions.
|
||||
- bcachefs-2copy: Create data partitions on two or more disks; later formatted as one multi-device bcachefs spanning all data partitions.
|
||||
- ssd_hdd_bcachefs: SSD gets BIOS boot + ESP + zoscache; HDD gets BIOS boot + ESP + zosdata; combined later into one bcachefs.
|
||||
- btrfs_raid1: Two disks minimum; data partitions mirrored via btrfs RAID1.
|
||||
|
||||
@@ -203,12 +203,12 @@ Application
|
||||
Kinds
|
||||
- Vfat for ESP, label ZOSBOOT.
|
||||
- Btrfs for data in btrfs_single, dual_independent, and btrfs_raid1 (with RAID1 profile).
|
||||
- Bcachefs for data in bcachefs_single, ssd_hdd_bcachefs (SSD cache + HDD backing), and bcachefs_2copy (multi-device).
|
||||
- Bcachefs for data in bcachefs_single, ssd_hdd_bcachefs (SSD cache + HDD backing), and bcachefs-2copy (multi-device).
|
||||
- All data filesystems use label ZOSDATA.
|
||||
|
||||
Defaults
|
||||
- btrfs: compression zstd:3, raid_profile none unless explicitly set; for btrfs_raid1 use -m raid1 -d raid1.
|
||||
- bcachefs: cache_mode promote, compression zstd, checksum crc32c; for bcachefs_2copy use `--replicas=2` (data and metadata).
|
||||
- bcachefs: cache_mode promote, compression zstd, checksum crc32c; for bcachefs-2copy use `--replicas=2` (data and metadata).
|
||||
- vfat: ESP label ZOSBOOT.
|
||||
|
||||
Planning and execution
|
||||
@@ -267,7 +267,7 @@ Kernel cmdline
|
||||
Help text sections
|
||||
- NAME, SYNOPSIS, DESCRIPTION
|
||||
- CONFIG PRECEDENCE
|
||||
- TOPOLOGIES: btrfs_single, bcachefs_single, dual_independent, bcachefs_2copy, ssd_hdd_bcachefs, btrfs_raid1
|
||||
- TOPOLOGIES: btrfs_single, bcachefs_single, dual_independent, bcachefs-2copy, ssd_hdd_bcachefs, btrfs_raid1
|
||||
- SAFETY AND IDEMPOTENCY
|
||||
- REPORTS
|
||||
- EXIT CODES: 0 success or already_provisioned, non-zero on error
|
||||
@@ -280,7 +280,7 @@ Scenarios to scaffold in [tests/](tests/)
|
||||
- Single disk 40 GiB virtio: validates btrfs_single topology end-to-end smoke.
|
||||
- Dual NVMe 40 GiB each: validates dual_independent topology (independent btrfs per disk).
|
||||
- SSD NVMe + HDD virtio: validates ssd_hdd_bcachefs topology (bcachefs with SSD cache/promote, HDD backing).
|
||||
- Three disks: validates bcachefs_2copy across data partitions using `--replicas=2`.
|
||||
- Three disks: validates bcachefs-2copy across data partitions using `--replicas=2`.
|
||||
- Negative: no eligible disks, or non-empty disk should abort.
|
||||
|
||||
Test strategy
|
||||
|
||||
109
docs/adr/0002-defaults-only-no-external-config.md
Normal file
109
docs/adr/0002-defaults-only-no-external-config.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# ADR 0002: Defaults-Only Configuration; Remove External YAML Config
|
||||
|
||||
Status
|
||||
- Accepted
|
||||
- Date: 2025-10-06
|
||||
|
||||
Context
|
||||
- Running from initramfs at first boot provides no reliable access to an on-disk configuration file (e.g., /etc/zosstorage/config.yaml). An external file cannot be assumed to exist or be mounted.
|
||||
- The previous design added precedence and merge complexity across file, CLI, and kernel cmdline as documented in [docs/SCHEMA.md](../SCHEMA.md) and implemented via [fn load_and_merge()](../../src/config/loader.rs:1), increasing maintenance burden and risks of drift.
|
||||
- YAML introduces misconfiguration risk in early boot, adds I/O, and complicates idempotency guarantees without meaningful benefits for the intended minimal-first initializer.
|
||||
- The desired model is to ship with sane built-in defaults, selected automatically from detected hardware topology; optional kernel cmdline may override only the topology choice for VM/lab scenarios.
|
||||
|
||||
Decision
|
||||
- Remove all dependency on an on-disk configuration file:
|
||||
- Do not read /etc/zosstorage/config.yaml or any file-based config.
|
||||
- Deprecate and ignore repository-local config files for runtime (e.g., config/zosstorage.yaml). The example file [config/zosstorage.example.yaml](../../config/zosstorage.example.yaml) remains as historical reference only and may be removed later.
|
||||
- Deprecate the --config CLI flag in [struct Cli](../../src/cli/args.rs:1). If present, emit a deprecation warning and ignore it.
|
||||
- Retain operational CLI flags and logging controls for usability:
|
||||
- --apply, --show, --report PATH, --fstab, --log-level LEVEL, --log-to-file
|
||||
- Replace the prior file/CLI/kernel precedence with a defaults-only policy plus a single optional kernel cmdline override:
|
||||
- Recognized key: zosstorage.topology=VALUE
|
||||
- The key may override only the topology selection; all other settings use built-in defaults.
|
||||
- Topology defaults and override policy:
|
||||
- 1 eligible disk:
|
||||
- Default: btrfs_single
|
||||
- Allowed cmdline overrides: btrfs_single, bcachefs_single
|
||||
- 2 eligible disks:
|
||||
- Default: dual_independent
|
||||
- Allowed cmdline overrides: dual_independent, ssd_hdd_bcachefs, btrfs_raid1, bcachefs-2copy
|
||||
- >2 eligible disks:
|
||||
- Default: btrfs_raid1
|
||||
- Allowed cmdline overrides: btrfs_raid1, bcachefs-2copy
|
||||
- Accept both snake_case and hyphenated forms for VALUE; canonical for two-copy bcachefs is bcachefs-2copy; normalize to [enum Topology](../../src/types.rs:1):
|
||||
- btrfs_single | btrfs-single
|
||||
- bcachefs_single | bcachefs-single
|
||||
- dual_independent | dual-independent
|
||||
- ssd_hdd_bcachefs | ssd-hdd-bcachefs
|
||||
- btrfs_raid1 | btrfs-raid1
|
||||
- bcachefs-2copy
|
||||
- Kernel cmdline parsing beyond topology is deferred; future extensions for VM workflows may be proposed separately.
|
||||
|
||||
Rationale
|
||||
- Eliminates unreachable configuration paths at first boot and simplifies the mental model.
|
||||
- Reduces maintenance overhead by removing schema and precedence logic.
|
||||
- Minimizes early-boot I/O and failure modes while preserving a targeted override for lab/VMs.
|
||||
- Keeps the tool safe-by-default and fully idempotent without depending on external files.
|
||||
|
||||
Consequences
|
||||
- Documentation:
|
||||
- Mark [docs/SCHEMA.md](../SCHEMA.md) as deprecated for runtime behavior; retain only as historical reference.
|
||||
- Update [docs/ARCHITECTURE.md](../ARCHITECTURE.md) and [docs/SPECS.md](../SPECS.md) to reflect defaults-only configuration.
|
||||
- Update [docs/API.md](../API.md) and [docs/API-SKELETONS.md](../API-SKELETONS.md) where they reference file-based config.
|
||||
- CLI:
|
||||
- [struct Cli](../../src/cli/args.rs:1) keeps operational flags; --config becomes a no-op with a deprecation warning.
|
||||
- Code:
|
||||
- Replace [fn load_and_merge()](../../src/config/loader.rs:1) with a minimal loader that:
|
||||
- Builds a [struct Config](../../src/types.rs:1) entirely from baked-in defaults.
|
||||
- Reads /proc/cmdline to optionally parse zosstorage.topology and normalize to [enum Topology](../../src/types.rs:1).
|
||||
- Removes YAML parsing, file reads, and merge logic.
|
||||
- Tests:
|
||||
- Remove tests that depend on external YAML; add tests for cmdline override normalization and disk-count defaults.
|
||||
|
||||
Defaults (authoritative)
|
||||
- Partitioning:
|
||||
- GPT only, 1 MiB alignment, BIOS boot 1 MiB first unless UEFI detected via [fn is_efi_boot()](../../src/util/mod.rs:1).
|
||||
- ESP 512 MiB labeled ZOSBOOT (GPT name: zosboot), data uses GPT name zosdata.
|
||||
- Filesystems:
|
||||
- ESP: vfat labeled ZOSBOOT
|
||||
- Data: label ZOSDATA
|
||||
- Backend per topology (btrfs for btrfs_*; bcachefs for ssd_hdd_bcachefs and bcachefs-2copy)
|
||||
- Mount scheme:
|
||||
- Root-mount all data filesystems under /var/mounts/{UUID}; final subvolume/subdir mounts from the primary data FS to /var/cache/{system,etc,modules,vm-meta}; fstab remains optional.
|
||||
- Idempotency:
|
||||
- Unchanged: already-provisioned signals exit success-without-changes via [fn detect_existing_state()](../../src/idempotency/mod.rs:1).
|
||||
|
||||
Implementation Plan
|
||||
1) Introduce a minimal defaults loader in [src/config/loader.rs](../../src/config/loader.rs:1):
|
||||
- new internal fn parse_topology_from_cmdline() -> Option<Topology>
|
||||
- new internal fn normalize_topology(s: &str) -> Option<Topology>
|
||||
- refactor load to construct Config from constants + optional topology override
|
||||
2) CLI:
|
||||
- Emit deprecation warning when --config is provided; ignore its value.
|
||||
3) Docs:
|
||||
- Add deprecation banner to [docs/SCHEMA.md](../SCHEMA.md).
|
||||
- Adjust [README.md](../../README.md) to describe defaults and the zosstorage.topology override.
|
||||
4) Tests:
|
||||
- Add unit tests for normalization and disk-count policy; remove YAML-based tests.
|
||||
|
||||
Backward Compatibility
|
||||
- External YAML configuration is no longer supported at runtime.
|
||||
- Kernel cmdline key zosstorage.config= is removed. Only zosstorage.topology remains recognized.
|
||||
- The JSON report, labels, GPT names, and mount behavior remain unchanged.
|
||||
|
||||
Security and Safety
|
||||
- By eliminating external configuration input, we reduce attack surface and misconfiguration risk in early boot.
|
||||
- The emptiness and idempotency checks continue to gate destructive operations.
|
||||
|
||||
Open Items
|
||||
- Decide whether to accept additional synonyms (e.g., “bcachefs-raid1”) and map them to existing [enum Topology](../../src/types.rs:1) variants; default is to reject unknown values with a clear error.
|
||||
- Potential future kernel cmdline keys (e.g., logging level) may be explored via a separate ADR.
|
||||
|
||||
Links
|
||||
- Architecture: [docs/ARCHITECTURE.md](../ARCHITECTURE.md)
|
||||
- API Index: [docs/API-SKELETONS.md](../API-SKELETONS.md)
|
||||
- Specs: [docs/SPECS.md](../SPECS.md)
|
||||
- CLI: [src/cli/args.rs](../../src/cli/args.rs)
|
||||
- Config loader: [src/config/loader.rs](../../src/config/loader.rs)
|
||||
- Types: [src/types.rs](../../src/types.rs)
|
||||
- Util: [src/util/mod.rs](../../src/util/mod.rs)
|
||||
2932
docs/callgraph.html
Normal file
2932
docs/callgraph.html
Normal file
File diff suppressed because it is too large
Load Diff
@@ -58,8 +58,8 @@ impl std::fmt::Display for LogLevelArg {
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "zosstorage", disable_help_subcommand = true)]
|
||||
pub struct Cli {
|
||||
/// Path to YAML configuration (mirrors kernel cmdline key 'zosstorage.config=')
|
||||
#[arg(short = 'c', long = "config")]
|
||||
/// DEPRECATED: external YAML configuration is not used at runtime (ADR-0002). Ignored with a warning.
|
||||
#[arg(short = 'c', long = "config", hide = true)]
|
||||
pub config: Option<String>,
|
||||
|
||||
/// Log level: error, warn, info, debug
|
||||
@@ -74,7 +74,7 @@ pub struct Cli {
|
||||
#[arg(short = 's', long = "fstab", default_value_t = false)]
|
||||
pub fstab: bool,
|
||||
|
||||
/// Select topology (overrides config topology)
|
||||
/// Select topology (CLI has precedence over kernel cmdline)
|
||||
#[arg(short = 't', long = "topology", value_enum)]
|
||||
pub topology: Option<crate::types::Topology>,
|
||||
|
||||
@@ -82,28 +82,27 @@ pub struct Cli {
|
||||
#[arg(short = 'f', long = "force")]
|
||||
pub force: bool,
|
||||
|
||||
/// Allow removable devices (e.g., USB sticks) to be considered during discovery
|
||||
/// Overrides config.device_selection.allow_removable when provided
|
||||
/// Include removable devices (e.g., USB sticks) during discovery (default: false)
|
||||
#[arg(long = "allow-removable", default_value_t = false)]
|
||||
pub allow_removable: bool,
|
||||
|
||||
|
||||
/// Attempt to mount existing filesystems based on on-disk headers; no partitioning or mkfs.
|
||||
/// Non-destructive mounting flow; uses UUID= sources and policy from config.
|
||||
#[arg(long = "mount-existing", default_value_t = false)]
|
||||
pub mount_existing: bool,
|
||||
|
||||
|
||||
/// Report current initialized filesystems and mounts without performing changes.
|
||||
#[arg(long = "report-current", default_value_t = false)]
|
||||
pub report_current: bool,
|
||||
|
||||
|
||||
/// Print detection and planning summary as JSON to stdout (non-default)
|
||||
#[arg(long = "show", default_value_t = false)]
|
||||
pub show: bool,
|
||||
|
||||
/// Write detection/planning JSON report to the given path (overrides config.report.path)
|
||||
|
||||
/// Write detection/planning JSON report to the given path
|
||||
#[arg(long = "report")]
|
||||
pub report: Option<String>,
|
||||
|
||||
|
||||
/// Execute destructive actions (apply mode). When false, runs preview-only.
|
||||
#[arg(long = "apply", default_value_t = false)]
|
||||
pub apply: bool,
|
||||
@@ -112,4 +111,4 @@ pub struct Cli {
|
||||
/// Parse CLI arguments (non-interactive; suitable for initramfs).
|
||||
pub fn from_args() -> Cli {
|
||||
Cli::parse()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,4 +10,4 @@
|
||||
|
||||
pub mod args;
|
||||
|
||||
pub use args::*;
|
||||
pub use args::*;
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
//! Configuration loading, merging, and validation (loader).
|
||||
//!
|
||||
//! Precedence (highest to lowest):
|
||||
//! - CLI flags (and optional `--config PATH` when provided)
|
||||
//! - Kernel cmdline key `zosstorage.topo=`
|
||||
//! - Built-in defaults
|
||||
//!
|
||||
//! See [docs/SCHEMA.md](../../docs/SCHEMA.md) for the schema details.
|
||||
//// Precedence and policy (ADR-0002):
|
||||
//// - Built-in sane defaults for all settings.
|
||||
//// - Kernel cmdline key `zosstorage.topology=` (legacy alias `zosstorage.topo=`) may override topology only.
|
||||
//// - CLI flags control operational toggles only (logging, fstab, allow-removable).
|
||||
//// - `--config` and `--topology` are deprecated and ignored (warnings emitted).
|
||||
////
|
||||
//// Note: [docs/SCHEMA.md](../../docs/SCHEMA.md) is deprecated for runtime configuration; defaults are code-defined.
|
||||
//
|
||||
// REGION: API
|
||||
// api: config::load_and_merge(cli: &crate::cli::Cli) -> crate::Result<crate::config::types::Config>
|
||||
@@ -40,23 +41,21 @@
|
||||
// REGION: TODO-END
|
||||
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
use crate::{cli::Cli, Error, Result};
|
||||
use crate::types::*;
|
||||
use crate::{Error, Result, cli::Cli};
|
||||
use serde_json::{Map, Value, json};
|
||||
use base64::Engine as _;
|
||||
use tracing::warn;
|
||||
|
||||
//// Load defaults, merge optional CLI --config, overlay CLI flags (highest precedence),
|
||||
//// then consider kernel cmdline topology only if CLI omitted it.
|
||||
//// Build configuration from built-in defaults and minimal operational CLI overlays.
|
||||
/// Returns a validated Config on success.
|
||||
///
|
||||
/// Behavior:
|
||||
/// - Starts from built-in defaults (documented in docs/SCHEMA.md)
|
||||
/// - Skips implicit /etc reads in initramfs
|
||||
/// - If CLI --config is provided, merge that (overrides defaults)
|
||||
/// - If kernel cmdline provides `zosstorage.topo=...` and CLI did NOT specify `--topology`, apply it
|
||||
/// - Returns Error::Unimplemented when --force is used
|
||||
/// Behavior (ADR-0002):
|
||||
/// - Start from built-in defaults (code-defined).
|
||||
/// - Ignore on-disk YAML and `--config` (deprecated); emit a warning if provided.
|
||||
/// - CLI `--topology` is supported and has precedence when provided.
|
||||
/// - If CLI does not provide topology, apply kernel cmdline `zosstorage.topology=` (or legacy `zosstorage.topo=`).
|
||||
/// - Returns Error::Unimplemented when --force is used.
|
||||
pub fn load_and_merge(cli: &Cli) -> Result<Config> {
|
||||
if cli.force {
|
||||
return Err(Error::Unimplemented("--force flag is not implemented"));
|
||||
@@ -68,27 +67,27 @@ pub fn load_and_merge(cli: &Cli) -> Result<Config> {
|
||||
// 2) (initramfs) Skipped reading default on-disk config to avoid dependency on /etc.
|
||||
// If a config is needed, pass it via --config PATH or kernel cmdline `zosstorage.config=...`.
|
||||
|
||||
// 3) Merge CLI referenced config (if any)
|
||||
if let Some(cfg_path) = &cli.config {
|
||||
let v = load_yaml_value(cfg_path)?;
|
||||
merge_value(&mut merged, v);
|
||||
// 3) Deprecated config file flag: warn and ignore
|
||||
if cli.config.is_some() {
|
||||
warn!("--config is deprecated and ignored (ADR-0002: defaults-only)");
|
||||
}
|
||||
// (no file merge)
|
||||
|
||||
// 4) Overlay CLI flags (non-path flags)
|
||||
let cli_overlay = cli_overlay_value(cli);
|
||||
merge_value(&mut merged, cli_overlay);
|
||||
|
||||
// 5) Kernel cmdline topology (only if CLI did not specify topology), e.g., `zosstorage.topo=dual-independent`
|
||||
if cli.topology.is_none() {
|
||||
if let Some(topo) = kernel_cmdline_topology() {
|
||||
merge_value(&mut merged, json!({"topology": topo.to_string()}));
|
||||
}
|
||||
}
|
||||
// 5) Kernel cmdline topology override only when CLI did not provide topology
|
||||
if cli.topology.is_none() {
|
||||
if let Some(topo) = kernel_cmdline_topology() {
|
||||
merge_value(&mut merged, json!({"topology": topo.to_string()}));
|
||||
}
|
||||
}
|
||||
|
||||
// Finalize
|
||||
let cfg: Config = serde_json::from_value(merged).map_err(|e| Error::Other(e.into()))?;
|
||||
validate(&cfg)?;
|
||||
Ok(cfg)
|
||||
// Finalize
|
||||
let cfg: Config = serde_json::from_value(merged).map_err(|e| Error::Other(e.into()))?;
|
||||
validate(&cfg)?;
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
/// Validate semantic correctness of the configuration.
|
||||
@@ -129,43 +128,50 @@ pub fn validate(cfg: &Config) -> Result<()> {
|
||||
}
|
||||
|
||||
// Reserved GPT names
|
||||
if cfg.partitioning.esp.gpt_name != "zosboot" {
|
||||
return Err(Error::Validation(
|
||||
"partitioning.esp.gpt_name must be 'zosboot'".into(),
|
||||
));
|
||||
if cfg.partitioning.esp.gpt_name != GPT_NAME_ZOSBOOT {
|
||||
return Err(Error::Validation(format!(
|
||||
"partitioning.esp.gpt_name must be '{}'",
|
||||
GPT_NAME_ZOSBOOT
|
||||
)));
|
||||
}
|
||||
if cfg.partitioning.data.gpt_name != "zosdata" {
|
||||
return Err(Error::Validation(
|
||||
"partitioning.data.gpt_name must be 'zosdata'".into(),
|
||||
));
|
||||
if cfg.partitioning.data.gpt_name != GPT_NAME_ZOSDATA {
|
||||
return Err(Error::Validation(format!(
|
||||
"partitioning.data.gpt_name must be '{}'",
|
||||
GPT_NAME_ZOSDATA
|
||||
)));
|
||||
}
|
||||
if cfg.partitioning.cache.gpt_name != "zoscache" {
|
||||
return Err(Error::Validation(
|
||||
"partitioning.cache.gpt_name must be 'zoscache'".into(),
|
||||
));
|
||||
if cfg.partitioning.cache.gpt_name != GPT_NAME_ZOSCACHE {
|
||||
return Err(Error::Validation(format!(
|
||||
"partitioning.cache.gpt_name must be '{}'",
|
||||
GPT_NAME_ZOSCACHE
|
||||
)));
|
||||
}
|
||||
// BIOS boot name is also 'zosboot' per current assumption
|
||||
if cfg.partitioning.bios_boot.gpt_name != "zosboot" {
|
||||
return Err(Error::Validation(
|
||||
"partitioning.bios_boot.gpt_name must be 'zosboot'".into(),
|
||||
));
|
||||
if cfg.partitioning.bios_boot.gpt_name != GPT_NAME_ZOSBOOT {
|
||||
return Err(Error::Validation(format!(
|
||||
"partitioning.bios_boot.gpt_name must be '{}'",
|
||||
GPT_NAME_ZOSBOOT
|
||||
)));
|
||||
}
|
||||
|
||||
// Reserved filesystem labels
|
||||
if cfg.filesystem.vfat.label != "ZOSBOOT" {
|
||||
return Err(Error::Validation(
|
||||
"filesystem.vfat.label must be 'ZOSBOOT'".into(),
|
||||
));
|
||||
if cfg.filesystem.vfat.label != LABEL_ZOSBOOT {
|
||||
return Err(Error::Validation(format!(
|
||||
"filesystem.vfat.label must be '{}'",
|
||||
LABEL_ZOSBOOT
|
||||
)));
|
||||
}
|
||||
if cfg.filesystem.btrfs.label != "ZOSDATA" {
|
||||
return Err(Error::Validation(
|
||||
"filesystem.btrfs.label must be 'ZOSDATA'".into(),
|
||||
));
|
||||
if cfg.filesystem.btrfs.label != LABEL_ZOSDATA {
|
||||
return Err(Error::Validation(format!(
|
||||
"filesystem.btrfs.label must be '{}'",
|
||||
LABEL_ZOSDATA
|
||||
)));
|
||||
}
|
||||
if cfg.filesystem.bcachefs.label != "ZOSDATA" {
|
||||
return Err(Error::Validation(
|
||||
"filesystem.bcachefs.label must be 'ZOSDATA'".into(),
|
||||
));
|
||||
if cfg.filesystem.bcachefs.label != LABEL_ZOSDATA {
|
||||
return Err(Error::Validation(format!(
|
||||
"filesystem.bcachefs.label must be '{}'",
|
||||
LABEL_ZOSDATA
|
||||
)));
|
||||
}
|
||||
|
||||
// Mount scheme
|
||||
@@ -182,7 +188,9 @@ pub fn validate(cfg: &Config) -> Result<()> {
|
||||
Topology::Bcachefs2Copy => {}
|
||||
Topology::BtrfsRaid1 => {
|
||||
// No enforced requirement here beyond presence of two disks at runtime.
|
||||
if cfg.filesystem.btrfs.raid_profile != "raid1" && cfg.filesystem.btrfs.raid_profile != "none" {
|
||||
if cfg.filesystem.btrfs.raid_profile != "raid1"
|
||||
&& cfg.filesystem.btrfs.raid_profile != "none"
|
||||
{
|
||||
return Err(Error::Validation(
|
||||
"filesystem.btrfs.raid_profile must be 'none' or 'raid1'".into(),
|
||||
));
|
||||
@@ -204,15 +212,6 @@ fn to_value<T: serde::Serialize>(t: T) -> Result<Value> {
|
||||
serde_json::to_value(t).map_err(|e| Error::Other(e.into()))
|
||||
}
|
||||
|
||||
fn load_yaml_value(path: &str) -> Result<Value> {
|
||||
let s = fs::read_to_string(path)
|
||||
.map_err(|e| Error::Config(format!("failed to read config file {}: {}", path, e)))?;
|
||||
// Load as generic serde_json::Value for merging flexibility
|
||||
let v: serde_json::Value = serde_yaml::from_str(&s)
|
||||
.map_err(|e| Error::Config(format!("failed to parse YAML {}: {}", path, e)))?;
|
||||
Ok(v)
|
||||
}
|
||||
|
||||
/// Merge b into a in-place:
|
||||
/// - Objects are merged key-by-key (recursively)
|
||||
/// - Arrays and scalars replace
|
||||
@@ -259,72 +258,29 @@ fn cli_overlay_value(cli: &Cli) -> Value {
|
||||
root.insert("device_selection".into(), Value::Object(device_selection));
|
||||
}
|
||||
|
||||
// topology override via --topology (avoid moving out of borrowed field)
|
||||
// topology override via --topology (takes precedence over kernel cmdline)
|
||||
if let Some(t) = cli.topology.as_ref() {
|
||||
root.insert("topology".into(), Value::String(t.to_string()));
|
||||
}
|
||||
|
||||
|
||||
Value::Object(root)
|
||||
}
|
||||
|
||||
enum KernelConfigSource {
|
||||
Path(String),
|
||||
/// Raw YAML from a data: URL payload after decoding (if base64-encoded).
|
||||
Data(String),
|
||||
}
|
||||
|
||||
/// Resolve a config from kernel cmdline key `zosstorage.config=`.
|
||||
/// Supports:
|
||||
/// - absolute paths (e.g., /run/zos.yaml)
|
||||
/// - file:/absolute/path
|
||||
/// - data:application/x-yaml;base64,BASE64CONTENT
|
||||
/// Returns Ok(None) when key absent.
|
||||
fn kernel_cmdline_config_source() -> Result<Option<KernelConfigSource>> {
|
||||
//// Parse kernel cmdline for topology override.
|
||||
//// Accepts `zosstorage.topology=` and legacy alias `zosstorage.topo=`.
|
||||
pub fn kernel_cmdline_topology() -> Option<Topology> {
|
||||
let cmdline = fs::read_to_string("/proc/cmdline").unwrap_or_default();
|
||||
for token in cmdline.split_whitespace() {
|
||||
if let Some(rest) = token.strip_prefix("zosstorage.config=") {
|
||||
let mut val = rest.to_string();
|
||||
// Trim surrounding quotes if any
|
||||
if (val.starts_with('"') && val.ends_with('"')) || (val.starts_with('\'') && val.ends_with('\'')) {
|
||||
val = val[1..val.len() - 1].to_string();
|
||||
}
|
||||
if let Some(path) = val.strip_prefix("file:") {
|
||||
return Ok(Some(KernelConfigSource::Path(path.to_string())));
|
||||
}
|
||||
if let Some(data_url) = val.strip_prefix("data:") {
|
||||
// data:[<mediatype>][;base64],<data>
|
||||
// Find comma separating the header and payload
|
||||
if let Some(idx) = data_url.find(',') {
|
||||
let (header, payload) = data_url.split_at(idx);
|
||||
let payload = &payload[1..]; // skip the comma
|
||||
let is_base64 = header.split(';').any(|seg| seg.eq_ignore_ascii_case("base64"));
|
||||
let yaml = if is_base64 {
|
||||
let decoded = base64::engine::general_purpose::STANDARD
|
||||
.decode(payload.as_bytes())
|
||||
.map_err(|e| Error::Config(format!("invalid base64 in data: URL: {}", e)))?;
|
||||
String::from_utf8(decoded)
|
||||
.map_err(|e| Error::Config(format!("data: URL payload not UTF-8: {}", e)))?
|
||||
} else {
|
||||
payload.to_string()
|
||||
};
|
||||
return Ok(Some(KernelConfigSource::Data(yaml)));
|
||||
} else {
|
||||
return Err(Error::Config("malformed data: URL (missing comma)".into()));
|
||||
}
|
||||
}
|
||||
// Treat as direct path
|
||||
return Ok(Some(KernelConfigSource::Path(val)));
|
||||
let mut val_opt = None;
|
||||
if let Some(v) = token.strip_prefix("zosstorage.topology=") {
|
||||
val_opt = Some(v);
|
||||
} else if let Some(v) = token.strip_prefix("zosstorage.topo=") {
|
||||
val_opt = Some(v);
|
||||
}
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn kernel_cmdline_topology() -> Option<Topology> {
|
||||
let cmdline = fs::read_to_string("/proc/cmdline").unwrap_or_default();
|
||||
for token in cmdline.split_whitespace() {
|
||||
if let Some(mut val) = token.strip_prefix("zosstorage.topo=") {
|
||||
// Trim surrounding quotes if any
|
||||
if (val.starts_with('"') && val.ends_with('"')) || (val.starts_with('\'') && val.ends_with('\'')) {
|
||||
if let Some(mut val) = val_opt {
|
||||
if (val.starts_with('"') && val.ends_with('"'))
|
||||
|| (val.starts_with('\'') && val.ends_with('\''))
|
||||
{
|
||||
val = &val[1..val.len() - 1];
|
||||
}
|
||||
let val_norm = val.trim();
|
||||
@@ -336,16 +292,17 @@ fn kernel_cmdline_topology() -> Option<Topology> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Helper to parse known topology tokens in kebab- or snake-case.
|
||||
//// Helper to parse known topology tokens (canonical names only).
|
||||
//// Note: underscores are normalized to hyphens prior to matching.
|
||||
fn parse_topology_token(s: &str) -> Option<Topology> {
|
||||
// Normalize underscores to hyphens for simpler matching.
|
||||
let k = s.trim().to_ascii_lowercase().replace('_', "-");
|
||||
match k.as_str() {
|
||||
"btrfs-single" => Some(Topology::BtrfsSingle),
|
||||
"bcachefs-single" => Some(Topology::BcachefsSingle),
|
||||
"dual-independent" => Some(Topology::DualIndependent),
|
||||
"ssd-hdd-bcachefs" => Some(Topology::SsdHddBcachefs),
|
||||
"bcachefs2-copy" | "bcachefs-2copy" | "bcachefs-2-copy" => Some(Topology::Bcachefs2Copy),
|
||||
// Canonical single notation for two-copy bcachefs topology
|
||||
"bcachefs-2copy" => Some(Topology::Bcachefs2Copy),
|
||||
"btrfs-raid1" => Some(Topology::BtrfsRaid1),
|
||||
_ => None,
|
||||
}
|
||||
@@ -420,4 +377,4 @@ fn default_config() -> Config {
|
||||
path: "/run/zosstorage/state.json".into(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,5 +11,5 @@
|
||||
|
||||
pub mod loader;
|
||||
|
||||
pub use loader::{load_and_merge, validate};
|
||||
pub use crate::types::*;
|
||||
pub use loader::{load_and_merge, validate};
|
||||
|
||||
@@ -186,7 +186,10 @@ pub fn discover(filter: &DeviceFilter) -> Result<Vec<Disk>> {
|
||||
discover_with_provider(&provider, filter)
|
||||
}
|
||||
|
||||
fn discover_with_provider<P: DeviceProvider>(provider: &P, filter: &DeviceFilter) -> Result<Vec<Disk>> {
|
||||
fn discover_with_provider<P: DeviceProvider>(
|
||||
provider: &P,
|
||||
filter: &DeviceFilter,
|
||||
) -> Result<Vec<Disk>> {
|
||||
let mut candidates = provider.list_block_devices()?;
|
||||
// Probe properties if provider needs to enrich
|
||||
for d in &mut candidates {
|
||||
@@ -210,10 +213,15 @@ fn discover_with_provider<P: DeviceProvider>(provider: &P, filter: &DeviceFilter
|
||||
.collect();
|
||||
|
||||
if filtered.is_empty() {
|
||||
return Err(Error::Device("no eligible disks found after applying filters".to_string()));
|
||||
return Err(Error::Device(
|
||||
"no eligible disks found after applying filters".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
debug!("eligible disks: {:?}", filtered.iter().map(|d| &d.path).collect::<Vec<_>>());
|
||||
debug!(
|
||||
"eligible disks: {:?}",
|
||||
filtered.iter().map(|d| &d.path).collect::<Vec<_>>()
|
||||
);
|
||||
Ok(filtered)
|
||||
}
|
||||
|
||||
@@ -259,9 +267,10 @@ fn read_disk_size_bytes(name: &str) -> Result<u64> {
|
||||
let p = sys_block_path(name).join("size");
|
||||
let sectors = fs::read_to_string(&p)
|
||||
.map_err(|e| Error::Device(format!("read {} failed: {}", p.display(), e)))?;
|
||||
let sectors: u64 = sectors.trim().parse().map_err(|e| {
|
||||
Error::Device(format!("parse sectors for {} failed: {}", name, e))
|
||||
})?;
|
||||
let sectors: u64 = sectors
|
||||
.trim()
|
||||
.parse()
|
||||
.map_err(|e| Error::Device(format!("parse sectors for {} failed: {}", name, e)))?;
|
||||
Ok(sectors.saturating_mul(512))
|
||||
}
|
||||
|
||||
@@ -287,11 +296,7 @@ fn read_optional_string(p: PathBuf) -> Option<String> {
|
||||
while s.ends_with('\n') || s.ends_with('\r') {
|
||||
s.pop();
|
||||
}
|
||||
if s.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(s)
|
||||
}
|
||||
if s.is_empty() { None } else { Some(s) }
|
||||
}
|
||||
Err(_) => None,
|
||||
}
|
||||
@@ -324,9 +329,27 @@ mod tests {
|
||||
fn filter_by_size_and_include_exclude() {
|
||||
let provider = MockProvider {
|
||||
disks: vec![
|
||||
Disk { path: "/dev/sda".into(), size_bytes: 500 * 1024 * 1024 * 1024, rotational: true, model: None, serial: None }, // 500 GiB
|
||||
Disk { path: "/dev/nvme0n1".into(), size_bytes: 128 * 1024 * 1024 * 1024, rotational: false, model: None, serial: None }, // 128 GiB
|
||||
Disk { path: "/dev/loop0".into(), size_bytes: 8 * 1024 * 1024 * 1024, rotational: false, model: None, serial: None }, // 8 GiB pseudo (but mock provider supplies it)
|
||||
Disk {
|
||||
path: "/dev/sda".into(),
|
||||
size_bytes: 500 * 1024 * 1024 * 1024,
|
||||
rotational: true,
|
||||
model: None,
|
||||
serial: None,
|
||||
}, // 500 GiB
|
||||
Disk {
|
||||
path: "/dev/nvme0n1".into(),
|
||||
size_bytes: 128 * 1024 * 1024 * 1024,
|
||||
rotational: false,
|
||||
model: None,
|
||||
serial: None,
|
||||
}, // 128 GiB
|
||||
Disk {
|
||||
path: "/dev/loop0".into(),
|
||||
size_bytes: 8 * 1024 * 1024 * 1024,
|
||||
rotational: false,
|
||||
model: None,
|
||||
serial: None,
|
||||
}, // 8 GiB pseudo (but mock provider supplies it)
|
||||
],
|
||||
};
|
||||
|
||||
@@ -346,7 +369,13 @@ mod tests {
|
||||
fn no_match_returns_error() {
|
||||
let provider = MockProvider {
|
||||
disks: vec![
|
||||
Disk { path: "/dev/sdb".into(), size_bytes: 50 * 1024 * 1024 * 1024, rotational: true, model: None, serial: None }, // 50 GiB
|
||||
Disk {
|
||||
path: "/dev/sdb".into(),
|
||||
size_bytes: 50 * 1024 * 1024 * 1024,
|
||||
rotational: true,
|
||||
model: None,
|
||||
serial: None,
|
||||
}, // 50 GiB
|
||||
],
|
||||
};
|
||||
|
||||
@@ -363,4 +392,4 @@ mod tests {
|
||||
other => panic!("unexpected error: {:?}", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,4 +9,4 @@
|
||||
|
||||
pub mod discovery;
|
||||
|
||||
pub use discovery::*;
|
||||
pub use discovery::*;
|
||||
|
||||
@@ -53,4 +53,4 @@ pub enum Error {
|
||||
}
|
||||
|
||||
/// Crate-wide result alias.
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
@@ -9,4 +9,4 @@
|
||||
|
||||
pub mod plan;
|
||||
|
||||
pub use plan::*;
|
||||
pub use plan::*;
|
||||
|
||||
169
src/fs/plan.rs
169
src/fs/plan.rs
@@ -18,21 +18,21 @@
|
||||
// ext: dry-run mode to emit mkfs commands without executing (future).
|
||||
// REGION: EXTENSION_POINTS-END
|
||||
//
|
||||
// REGION: SAFETY
|
||||
// safety: must not run mkfs on non-empty or unexpected partitions; assume prior validation enforced.
|
||||
// safety: ensure labels follow reserved semantics (ZOSBOOT for ESP, ZOSDATA for all data FS).
|
||||
// safety: mkfs.btrfs uses -f in apply path immediately after partitioning to handle leftover signatures.
|
||||
// REGION: SAFETY-END
|
||||
// REGION: SAFETY
|
||||
// safety: must not run mkfs on non-empty or unexpected partitions; assume prior validation enforced.
|
||||
// safety: ensure labels follow reserved semantics (ZOSBOOT for ESP, ZOSDATA for all data FS).
|
||||
// safety: mkfs.btrfs uses -f in apply path immediately after partitioning to handle leftover signatures.
|
||||
// REGION: SAFETY-END
|
||||
//
|
||||
// REGION: ERROR_MAPPING
|
||||
// errmap: external mkfs/blkid failures -> crate::Error::Tool with captured stderr.
|
||||
// errmap: planning mismatches -> crate::Error::Filesystem with context.
|
||||
// REGION: ERROR_MAPPING-END
|
||||
//
|
||||
// REGION: TODO
|
||||
// todo: bcachefs tuning flags mapping from config (compression/checksum/cache_mode) deferred
|
||||
// todo: add UUID consistency checks across multi-device filesystems
|
||||
// REGION: TODO-END
|
||||
// REGION: TODO
|
||||
// todo: bcachefs tuning flags mapping from config (compression/checksum/cache_mode) deferred
|
||||
// todo: add UUID consistency checks across multi-device filesystems
|
||||
// REGION: TODO-END
|
||||
//! Filesystem planning and creation for zosstorage.
|
||||
//!
|
||||
//! Maps partition results to concrete filesystems (vfat, btrfs, bcachefs)
|
||||
@@ -42,14 +42,13 @@
|
||||
//! [fn make_filesystems](plan.rs:1).
|
||||
|
||||
use crate::{
|
||||
Result,
|
||||
Error, Result,
|
||||
partition::{PartRole, PartitionResult},
|
||||
types::{Config, Topology},
|
||||
partition::{PartitionResult, PartRole},
|
||||
util::{run_cmd, run_cmd_capture, which_tool},
|
||||
Error,
|
||||
};
|
||||
use tracing::{debug, warn};
|
||||
use std::fs;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
/// Filesystem kinds supported by zosstorage.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
@@ -97,17 +96,14 @@ pub struct FsResult {
|
||||
pub label: String,
|
||||
}
|
||||
|
||||
/// Determine which partitions get which filesystem based on topology.
|
||||
///
|
||||
/// Rules:
|
||||
/// - ESP partitions => Vfat with label from cfg.filesystem.vfat.label (reserved "ZOSBOOT")
|
||||
/// - Data partitions => Btrfs with label cfg.filesystem.btrfs.label ("ZOSDATA"), unless topology SsdHddBcachefs
|
||||
/// - SsdHddBcachefs => pair one Cache partition (SSD) with one Data partition (HDD) into one Bcachefs FsSpec with devices [cache, data] and label cfg.filesystem.bcachefs.label ("ZOSDATA")
|
||||
/// - DualIndependent/BtrfsRaid1 => map each Data partition to its own Btrfs FsSpec (raid profile concerns are handled later during mkfs)
|
||||
pub fn plan_filesystems(
|
||||
parts: &[PartitionResult],
|
||||
cfg: &Config,
|
||||
) -> Result<FsPlan> {
|
||||
/// Determine which partitions get which filesystem based on topology.
|
||||
///
|
||||
/// Rules:
|
||||
/// - ESP partitions => Vfat with label from cfg.filesystem.vfat.label (reserved "ZOSBOOT")
|
||||
/// - Data partitions => Btrfs with label cfg.filesystem.btrfs.label ("ZOSDATA"), unless topology SsdHddBcachefs
|
||||
/// - SsdHddBcachefs => pair one Cache partition (SSD) with one Data partition (HDD) into one Bcachefs FsSpec with devices [cache, data] and label cfg.filesystem.bcachefs.label ("ZOSDATA")
|
||||
/// - DualIndependent/BtrfsRaid1 => map each Data partition to its own Btrfs FsSpec (raid profile concerns are handled later during mkfs)
|
||||
pub fn plan_filesystems(parts: &[PartitionResult], cfg: &Config) -> Result<FsPlan> {
|
||||
let mut specs: Vec<FsSpec> = Vec::new();
|
||||
|
||||
// Always map ESP partitions
|
||||
@@ -122,10 +118,22 @@ pub fn plan_filesystems(
|
||||
match cfg.topology {
|
||||
Topology::SsdHddBcachefs => {
|
||||
// Expect exactly one cache (SSD) and at least one data (HDD). Use the first data for pairing.
|
||||
let cache = parts.iter().find(|p| matches!(p.role, PartRole::Cache))
|
||||
.ok_or_else(|| Error::Filesystem("expected a Cache partition for SsdHddBcachefs topology".to_string()))?;
|
||||
let data = parts.iter().find(|p| matches!(p.role, PartRole::Data))
|
||||
.ok_or_else(|| Error::Filesystem("expected a Data partition for SsdHddBcachefs topology".to_string()))?;
|
||||
let cache = parts
|
||||
.iter()
|
||||
.find(|p| matches!(p.role, PartRole::Cache))
|
||||
.ok_or_else(|| {
|
||||
Error::Filesystem(
|
||||
"expected a Cache partition for SsdHddBcachefs topology".to_string(),
|
||||
)
|
||||
})?;
|
||||
let data = parts
|
||||
.iter()
|
||||
.find(|p| matches!(p.role, PartRole::Data))
|
||||
.ok_or_else(|| {
|
||||
Error::Filesystem(
|
||||
"expected a Data partition for SsdHddBcachefs topology".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
specs.push(FsSpec {
|
||||
kind: FsKind::Bcachefs,
|
||||
@@ -173,8 +181,14 @@ pub fn plan_filesystems(
|
||||
}
|
||||
Topology::BcachefsSingle => {
|
||||
// Single-device bcachefs on the sole Data partition.
|
||||
let data = parts.iter().find(|p| matches!(p.role, PartRole::Data))
|
||||
.ok_or_else(|| Error::Filesystem("expected a Data partition for BcachefsSingle topology".to_string()))?;
|
||||
let data = parts
|
||||
.iter()
|
||||
.find(|p| matches!(p.role, PartRole::Data))
|
||||
.ok_or_else(|| {
|
||||
Error::Filesystem(
|
||||
"expected a Data partition for BcachefsSingle topology".to_string(),
|
||||
)
|
||||
})?;
|
||||
specs.push(FsSpec {
|
||||
kind: FsKind::Bcachefs,
|
||||
devices: vec![data.device_path.clone()],
|
||||
@@ -194,7 +208,9 @@ pub fn plan_filesystems(
|
||||
}
|
||||
|
||||
if specs.is_empty() {
|
||||
return Err(Error::Filesystem("no filesystems to create from provided partitions".to_string()));
|
||||
return Err(Error::Filesystem(
|
||||
"no filesystems to create from provided partitions".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(FsPlan { specs })
|
||||
@@ -215,7 +231,9 @@ pub fn make_filesystems(plan: &FsPlan, cfg: &Config) -> Result<Vec<FsResult>> {
|
||||
let blkid_tool = which_tool("blkid")?;
|
||||
|
||||
if blkid_tool.is_none() {
|
||||
return Err(Error::Filesystem("blkid not found in PATH; cannot capture filesystem UUIDs".into()));
|
||||
return Err(Error::Filesystem(
|
||||
"blkid not found in PATH; cannot capture filesystem UUIDs".into(),
|
||||
));
|
||||
}
|
||||
let blkid = blkid_tool.unwrap();
|
||||
|
||||
@@ -248,7 +266,9 @@ pub fn make_filesystems(plan: &FsPlan, cfg: &Config) -> Result<Vec<FsResult>> {
|
||||
return Err(Error::Filesystem("mkfs.btrfs not found in PATH".into()));
|
||||
};
|
||||
if spec.devices.is_empty() {
|
||||
return Err(Error::Filesystem("btrfs requires at least one device".into()));
|
||||
return Err(Error::Filesystem(
|
||||
"btrfs requires at least one device".into(),
|
||||
));
|
||||
}
|
||||
// mkfs.btrfs -L LABEL [ -m raid1 -d raid1 (when multi-device/raid1) ] dev1 [dev2 ...]
|
||||
let mut args: Vec<String> = vec![mkfs.clone(), "-L".into(), spec.label.clone()];
|
||||
@@ -288,11 +308,18 @@ pub fn make_filesystems(plan: &FsPlan, cfg: &Config) -> Result<Vec<FsResult>> {
|
||||
return Err(Error::Filesystem("bcachefs not found in PATH".into()));
|
||||
};
|
||||
if spec.devices.is_empty() {
|
||||
return Err(Error::Filesystem("bcachefs requires at least one device".into()));
|
||||
return Err(Error::Filesystem(
|
||||
"bcachefs requires at least one device".into(),
|
||||
));
|
||||
}
|
||||
// bcachefs format --label LABEL [--replicas=2] dev1 [dev2 ...]
|
||||
// Apply replicas policy for Bcachefs2Copy topology (data+metadata replicas = 2)
|
||||
let mut args: Vec<String> = vec![mkfs.clone(), "format".into(), "--label".into(), spec.label.clone()];
|
||||
let mut args: Vec<String> = vec![
|
||||
mkfs.clone(),
|
||||
"format".into(),
|
||||
"--label".into(),
|
||||
spec.label.clone(),
|
||||
];
|
||||
if matches!(cfg.topology, Topology::Bcachefs2Copy) {
|
||||
args.push("--replicas=2".into());
|
||||
}
|
||||
@@ -318,29 +345,32 @@ pub fn make_filesystems(plan: &FsPlan, cfg: &Config) -> Result<Vec<FsResult>> {
|
||||
}
|
||||
|
||||
fn capture_uuid(blkid: &str, dev: &str) -> Result<String> {
|
||||
// blkid -o export /dev/...
|
||||
let out = run_cmd_capture(&[blkid, "-o", "export", dev])?;
|
||||
let map = parse_blkid_export(&out.stdout);
|
||||
// Prefer ID_FS_UUID if present, fall back to UUID
|
||||
if let Some(u) = map.get("ID_FS_UUID") {
|
||||
return Ok(u.clone());
|
||||
}
|
||||
if let Some(u) = map.get("UUID") {
|
||||
return Ok(u.clone());
|
||||
}
|
||||
warn!("blkid did not report UUID for {}", dev);
|
||||
Err(Error::Filesystem(format!("missing UUID in blkid output for {}", dev)))
|
||||
// blkid -o export /dev/...
|
||||
let out = run_cmd_capture(&[blkid, "-o", "export", dev])?;
|
||||
let map = parse_blkid_export(&out.stdout);
|
||||
// Prefer ID_FS_UUID if present, fall back to UUID
|
||||
if let Some(u) = map.get("ID_FS_UUID") {
|
||||
return Ok(u.clone());
|
||||
}
|
||||
if let Some(u) = map.get("UUID") {
|
||||
return Ok(u.clone());
|
||||
}
|
||||
warn!("blkid did not report UUID for {}", dev);
|
||||
Err(Error::Filesystem(format!(
|
||||
"missing UUID in blkid output for {}",
|
||||
dev
|
||||
)))
|
||||
}
|
||||
|
||||
/// Minimal parser for blkid -o export KEY=VAL lines.
|
||||
fn parse_blkid_export(s: &str) -> std::collections::HashMap<String, String> {
|
||||
let mut map = std::collections::HashMap::new();
|
||||
for line in s.lines() {
|
||||
if let Some((k, v)) = line.split_once('=') {
|
||||
map.insert(k.trim().to_string(), v.trim().to_string());
|
||||
}
|
||||
}
|
||||
map
|
||||
let mut map = std::collections::HashMap::new();
|
||||
for line in s.lines() {
|
||||
if let Some((k, v)) = line.split_once('=') {
|
||||
map.insert(k.trim().to_string(), v.trim().to_string());
|
||||
}
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
/// Probe existing filesystems on the system and return their identities (kind, uuid, label).
|
||||
@@ -354,13 +384,16 @@ fn parse_blkid_export(s: &str) -> std::collections::HashMap<String, String> {
|
||||
/// - Vec<FsResult> with at most one entry per filesystem UUID.
|
||||
pub fn probe_existing_filesystems() -> Result<Vec<FsResult>> {
|
||||
let Some(blkid) = which_tool("blkid")? else {
|
||||
return Err(Error::Filesystem("blkid not found in PATH; cannot probe existing filesystems".into()));
|
||||
return Err(Error::Filesystem(
|
||||
"blkid not found in PATH; cannot probe existing filesystems".into(),
|
||||
));
|
||||
};
|
||||
|
||||
let content = fs::read_to_string("/proc/partitions")
|
||||
.map_err(|e| Error::Filesystem(format!("/proc/partitions read error: {}", e)))?;
|
||||
|
||||
let mut results_by_uuid: std::collections::HashMap<String, FsResult> = std::collections::HashMap::new();
|
||||
let mut results_by_uuid: std::collections::HashMap<String, FsResult> =
|
||||
std::collections::HashMap::new();
|
||||
|
||||
for line in content.lines() {
|
||||
let line = line.trim();
|
||||
@@ -399,11 +432,13 @@ pub fn probe_existing_filesystems() -> Result<Vec<FsResult>> {
|
||||
let map = parse_blkid_export(&out.stdout);
|
||||
let ty = map.get("TYPE").cloned().unwrap_or_default();
|
||||
let label = map
|
||||
.get("ID_FS_LABEL").cloned()
|
||||
.get("ID_FS_LABEL")
|
||||
.cloned()
|
||||
.or_else(|| map.get("LABEL").cloned())
|
||||
.unwrap_or_default();
|
||||
let uuid = map
|
||||
.get("ID_FS_UUID").cloned()
|
||||
.get("ID_FS_UUID")
|
||||
.cloned()
|
||||
.or_else(|| map.get("UUID").cloned());
|
||||
|
||||
let (kind_opt, expected_label) = match ty.as_str() {
|
||||
@@ -434,13 +469,13 @@ pub fn probe_existing_filesystems() -> Result<Vec<FsResult>> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests_parse {
|
||||
use super::parse_blkid_export;
|
||||
use super::parse_blkid_export;
|
||||
|
||||
#[test]
|
||||
fn parse_export_ok() {
|
||||
let s = "ID_FS_UUID=abcd-1234\nUUID=abcd-1234\nTYPE=btrfs\n";
|
||||
let m = parse_blkid_export(s);
|
||||
assert_eq!(m.get("ID_FS_UUID").unwrap(), "abcd-1234");
|
||||
assert_eq!(m.get("TYPE").unwrap(), "btrfs");
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn parse_export_ok() {
|
||||
let s = "ID_FS_UUID=abcd-1234\nUUID=abcd-1234\nTYPE=btrfs\n";
|
||||
let m = parse_blkid_export(s);
|
||||
assert_eq!(m.get("ID_FS_UUID").unwrap(), "abcd-1234");
|
||||
assert_eq!(m.get("TYPE").unwrap(), "btrfs");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,14 +28,14 @@
|
||||
//! disks are empty before making any destructive changes.
|
||||
|
||||
use crate::{
|
||||
device::Disk,
|
||||
report::{StateReport, REPORT_VERSION},
|
||||
util::{run_cmd_capture, which_tool},
|
||||
Error, Result,
|
||||
device::Disk,
|
||||
report::{REPORT_VERSION, StateReport},
|
||||
util::{run_cmd_capture, which_tool},
|
||||
};
|
||||
use humantime::format_rfc3339;
|
||||
use serde_json::json;
|
||||
use std::{collections::HashMap, fs, path::Path};
|
||||
use humantime::format_rfc3339;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
/// Return existing state if system is already provisioned; otherwise None.
|
||||
@@ -155,7 +155,10 @@ pub fn is_empty_disk(disk: &Disk) -> Result<bool> {
|
||||
|
||||
// Probe with blkid -p
|
||||
let Some(blkid) = which_tool("blkid")? else {
|
||||
warn!("blkid not found; conservatively treating {} as not empty", disk.path);
|
||||
warn!(
|
||||
"blkid not found; conservatively treating {} as not empty",
|
||||
disk.path
|
||||
);
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
@@ -237,7 +240,11 @@ fn is_partition_of(base: &str, name: &str) -> bool {
|
||||
if name == base {
|
||||
return false;
|
||||
}
|
||||
let ends_with_digit = base.chars().last().map(|c| c.is_ascii_digit()).unwrap_or(false);
|
||||
let ends_with_digit = base
|
||||
.chars()
|
||||
.last()
|
||||
.map(|c| c.is_ascii_digit())
|
||||
.unwrap_or(false);
|
||||
if ends_with_digit {
|
||||
// nvme0n1 -> nvme0n1p1
|
||||
if name.starts_with(base) {
|
||||
@@ -281,4 +288,4 @@ mod tests {
|
||||
assert!(!is_partition_of("nvme0n1", "nvme0n1"));
|
||||
assert!(!is_partition_of("nvme0n1", "nvme0n2p1"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
20
src/lib.rs
20
src/lib.rs
@@ -1,20 +1,20 @@
|
||||
//! Crate root for zosstorage: one-shot disk provisioning utility for initramfs.
|
||||
|
||||
pub mod cli;
|
||||
pub mod logging;
|
||||
pub mod config;
|
||||
pub mod device;
|
||||
pub mod partition;
|
||||
pub mod fs;
|
||||
pub mod mount;
|
||||
pub mod report;
|
||||
pub mod orchestrator;
|
||||
pub mod idempotency;
|
||||
pub mod util;
|
||||
pub mod errors;
|
||||
pub mod types; // top-level types (moved from config/types.rs for visibility)
|
||||
pub mod fs;
|
||||
pub mod idempotency;
|
||||
pub mod logging;
|
||||
pub mod mount;
|
||||
pub mod orchestrator;
|
||||
pub mod partition;
|
||||
pub mod report;
|
||||
pub mod types;
|
||||
pub mod util; // top-level types (moved from config/types.rs for visibility)
|
||||
|
||||
pub use errors::{Error, Result};
|
||||
|
||||
/// Crate version string from Cargo.
|
||||
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
@@ -36,10 +36,10 @@ use std::fs::OpenOptions;
|
||||
use std::io::{self};
|
||||
use std::sync::OnceLock;
|
||||
use tracing::Level;
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
use tracing_subscriber::fmt;
|
||||
use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::registry::Registry;
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
/// Logging options resolved from CLI and/or config.
|
||||
@@ -116,21 +116,27 @@ pub fn init_logging(opts: &LogOptions) -> Result<()> {
|
||||
.with(stderr_layer)
|
||||
.with(file_layer)
|
||||
.try_init()
|
||||
.map_err(|e| crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e)))?;
|
||||
.map_err(|e| {
|
||||
crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e))
|
||||
})?;
|
||||
} else {
|
||||
// Fall back to stderr-only if file cannot be opened
|
||||
Registry::default()
|
||||
.with(stderr_layer)
|
||||
.try_init()
|
||||
.map_err(|e| crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e)))?;
|
||||
.map_err(|e| {
|
||||
crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e))
|
||||
})?;
|
||||
}
|
||||
} else {
|
||||
Registry::default()
|
||||
.with(stderr_layer)
|
||||
.try_init()
|
||||
.map_err(|e| crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e)))?;
|
||||
.map_err(|e| {
|
||||
crate::Error::Other(anyhow::anyhow!("failed to set global logger: {}", e))
|
||||
})?;
|
||||
}
|
||||
|
||||
let _ = INIT_GUARD.set(());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,6 +54,10 @@ fn real_main() -> Result<()> {
|
||||
.with_apply(cli.apply)
|
||||
.with_mount_existing(cli.mount_existing)
|
||||
.with_report_current(cli.report_current)
|
||||
.with_report_path(cli.report.clone());
|
||||
.with_report_path(cli.report.clone())
|
||||
.with_topology_from_cli(cli.topology.is_some())
|
||||
.with_topology_from_cmdline(
|
||||
config::loader::kernel_cmdline_topology().is_some() && cli.topology.is_none(),
|
||||
);
|
||||
orchestrator::run(&ctx)
|
||||
}
|
||||
|
||||
@@ -9,4 +9,4 @@
|
||||
|
||||
pub mod ops;
|
||||
|
||||
pub use ops::*;
|
||||
pub use ops::*;
|
||||
|
||||
283
src/mount/ops.rs
283
src/mount/ops.rs
@@ -7,13 +7,13 @@
|
||||
// REGION: API-END
|
||||
//
|
||||
// REGION: RESPONSIBILITIES
|
||||
// - Implement mount phase only: plan root mounts under /var/mounts/{UUID}, ensure/plan subvols, and mount subvols to /var/cache/*.
|
||||
// - Implement mount phase only: plan root mounts under /var/mounts/{UUID} for data, mount ESP at /boot, ensure/plan subvols, and mount subvols to /var/cache/*.
|
||||
// - Use UUID= sources, deterministic primary selection (first FsResult) for dual_independent.
|
||||
// - Generate fstab entries only for four subvol targets; exclude runtime root mounts.
|
||||
// - Generate fstab entries covering runtime roots (/var/mounts/{UUID}, /boot when present) followed by the four subvol targets.
|
||||
// REGION: RESPONSIBILITIES-END
|
||||
//
|
||||
// REGION: SAFETY
|
||||
// - Never mount ESP; only Btrfs/Bcachefs data FS. Root btrfs mounts use subvolid=5 (top-level).
|
||||
// - Mount ESP (VFAT) read-write at /boot once; data roots use subvolid=5 (btrfs) or plain (bcachefs).
|
||||
// - Create-if-missing subvolumes prior to subvol mounts; ensure directories exist.
|
||||
// - Always use UUID= sources; no device paths.
|
||||
// - Bcachefs subvolume mounts use option key 'X-mount.subdir={name}' (not 'subvol=').
|
||||
@@ -36,37 +36,123 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::{
|
||||
Error, Result,
|
||||
fs::{FsKind, FsResult},
|
||||
types::Config,
|
||||
util::{run_cmd, run_cmd_capture, which_tool},
|
||||
Error, Result,
|
||||
};
|
||||
use std::fs::{create_dir_all, File};
|
||||
use std::collections::HashMap;
|
||||
use std::fs::{File, create_dir_all};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use tracing::info;
|
||||
|
||||
const ROOT_BASE: &str = "/var/mounts";
|
||||
const BOOT_TARGET: &str = "/boot";
|
||||
const TARGET_SYSTEM: &str = "/var/cache/system";
|
||||
const TARGET_ETC: &str = "/var/cache/etc";
|
||||
const TARGET_MODULES: &str = "/var/cache/modules";
|
||||
const TARGET_VM_META: &str = "/var/cache/vm-meta";
|
||||
const SUBVOLS: &[&str] = &["system", "etc", "modules", "vm-meta"];
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ExistingMount {
|
||||
source: String,
|
||||
fstype: String,
|
||||
options: String,
|
||||
}
|
||||
|
||||
fn current_mounts() -> HashMap<String, ExistingMount> {
|
||||
let mut map = HashMap::new();
|
||||
if let Ok(content) = std::fs::read_to_string("/proc/self/mountinfo") {
|
||||
for line in content.lines() {
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 7 {
|
||||
continue;
|
||||
}
|
||||
let target = parts[4].to_string();
|
||||
let mount_options = parts[5].to_string();
|
||||
if let Some(idx) = parts.iter().position(|p| *p == "-") {
|
||||
if idx + 2 < parts.len() {
|
||||
let fstype = parts[idx + 1].to_string();
|
||||
let source = parts[idx + 2].to_string();
|
||||
let super_opts = if idx + 3 < parts.len() {
|
||||
parts[idx + 3].to_string()
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let combined_options = if super_opts.is_empty() {
|
||||
mount_options.clone()
|
||||
} else {
|
||||
format!("{mount_options},{super_opts}")
|
||||
};
|
||||
map.insert(
|
||||
target,
|
||||
ExistingMount {
|
||||
source,
|
||||
fstype,
|
||||
options: combined_options,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
fn source_matches_uuid(existing_source: &str, uuid: &str) -> bool {
|
||||
if existing_source == format!("UUID={}", uuid) {
|
||||
return true;
|
||||
}
|
||||
if let Some(existing_uuid) = existing_source.strip_prefix("UUID=") {
|
||||
return existing_uuid == uuid;
|
||||
}
|
||||
if existing_source.starts_with("/dev/") {
|
||||
let uuid_path = Path::new("/dev/disk/by-uuid").join(uuid);
|
||||
if let (Ok(existing_canon), Ok(uuid_canon)) = (
|
||||
std::fs::canonicalize(existing_source),
|
||||
std::fs::canonicalize(&uuid_path),
|
||||
) {
|
||||
return existing_canon == uuid_canon;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn disk_of_device(dev: &str) -> Option<String> {
|
||||
let path = Path::new(dev);
|
||||
let name = path.file_name()?.to_str()?;
|
||||
let mut cutoff = name.len();
|
||||
while cutoff > 0 && name.as_bytes()[cutoff - 1].is_ascii_digit() {
|
||||
cutoff -= 1;
|
||||
}
|
||||
if cutoff == name.len() {
|
||||
return Some(dev.to_string());
|
||||
}
|
||||
let mut disk = name[..cutoff].to_string();
|
||||
if disk.ends_with('p') {
|
||||
disk.pop();
|
||||
}
|
||||
let parent = path.parent()?.to_str().unwrap_or("/dev");
|
||||
Some(format!("{}/{}", parent, disk))
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PlannedMount {
|
||||
pub uuid: String, // UUID string without prefix
|
||||
pub target: String, // absolute path
|
||||
pub fstype: String, // "btrfs" | "bcachefs"
|
||||
pub options: String, // e.g., "rw,noatime,subvolid=5"
|
||||
pub uuid: String, // UUID string without prefix
|
||||
pub target: String, // absolute path
|
||||
pub fstype: String, // "btrfs" | "bcachefs"
|
||||
pub options: String, // e.g., "rw,noatime,subvolid=5"
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PlannedSubvolMount {
|
||||
pub uuid: String, // UUID of primary FS
|
||||
pub name: String, // subvol name (system/etc/modules/vm-meta)
|
||||
pub target: String, // absolute final target
|
||||
pub fstype: String, // "btrfs" | "bcachefs"
|
||||
pub options: String, // e.g., "rw,noatime,subvol=system"
|
||||
pub uuid: String, // UUID of primary FS
|
||||
pub name: String, // subvol name (system/etc/modules/vm-meta)
|
||||
pub target: String, // absolute final target
|
||||
pub fstype: String, // "btrfs" | "bcachefs"
|
||||
pub options: String, // e.g., "rw,noatime,subvol=system"
|
||||
}
|
||||
|
||||
/// Mount plan per policy.
|
||||
@@ -134,11 +220,36 @@ pub fn plan_mounts(fs_results: &[FsResult], _cfg: &Config) -> Result<MountPlan>
|
||||
});
|
||||
}
|
||||
|
||||
// Determine primary UUID
|
||||
let primary_uuid = Some(data[0].uuid.clone());
|
||||
let primary = data[0];
|
||||
let primary_uuid = Some(primary.uuid.clone());
|
||||
let primary_disk = primary.devices.first().and_then(|dev| disk_of_device(dev));
|
||||
|
||||
let mut chosen_esp: Option<&FsResult> = None;
|
||||
let mut fallback_esp: Option<&FsResult> = None;
|
||||
for esp in fs_results.iter().filter(|r| matches!(r.kind, FsKind::Vfat)) {
|
||||
if fallback_esp.is_none() {
|
||||
fallback_esp = Some(esp);
|
||||
}
|
||||
if let (Some(ref disk), Some(esp_disk)) = (
|
||||
primary_disk.as_ref(),
|
||||
esp.devices.first().and_then(|dev| disk_of_device(dev)),
|
||||
) {
|
||||
if esp_disk == **disk {
|
||||
chosen_esp = Some(esp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(esp) = chosen_esp.or(fallback_esp) {
|
||||
root_mounts.push(PlannedMount {
|
||||
uuid: esp.uuid.clone(),
|
||||
target: BOOT_TARGET.to_string(),
|
||||
fstype: fstype_str(esp.kind).to_string(),
|
||||
options: "rw".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Subvol mounts only from primary FS
|
||||
let primary = data[0];
|
||||
let mut subvol_mounts: Vec<PlannedSubvolMount> = Vec::new();
|
||||
let fstype = fstype_str(primary.kind).to_string();
|
||||
// Option key differs per filesystem: btrfs uses subvol=, bcachefs uses X-mount.subdir=
|
||||
@@ -189,11 +300,37 @@ pub fn apply_mounts(plan: &MountPlan) -> Result<Vec<MountResult>> {
|
||||
.map_err(|e| Error::Mount(format!("failed to create dir {}: {}", sm.target, e)))?;
|
||||
}
|
||||
|
||||
let mut results: Vec<MountResult> = Vec::new();
|
||||
let mut results_map: HashMap<String, MountResult> = HashMap::new();
|
||||
let mut existing_mounts = current_mounts();
|
||||
|
||||
// Root mounts
|
||||
for pm in &plan.root_mounts {
|
||||
let source = format!("UUID={}", pm.uuid);
|
||||
if let Some(existing) = existing_mounts.get(pm.target.as_str()) {
|
||||
if source_matches_uuid(&existing.source, &pm.uuid) {
|
||||
info!(
|
||||
"mount::apply_mounts: target {} already mounted; skipping",
|
||||
pm.target
|
||||
);
|
||||
let existing_fstype = existing.fstype.clone();
|
||||
let existing_options = existing.options.clone();
|
||||
results_map
|
||||
.entry(pm.target.clone())
|
||||
.or_insert_with(|| MountResult {
|
||||
source: source.clone(),
|
||||
target: pm.target.clone(),
|
||||
fstype: existing_fstype,
|
||||
options: existing_options,
|
||||
});
|
||||
continue;
|
||||
} else {
|
||||
return Err(Error::Mount(format!(
|
||||
"target {} already mounted by {} (expected UUID={})",
|
||||
pm.target, existing.source, pm.uuid
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
let args = [
|
||||
mount_tool.as_str(),
|
||||
"-t",
|
||||
@@ -204,12 +341,23 @@ pub fn apply_mounts(plan: &MountPlan) -> Result<Vec<MountResult>> {
|
||||
pm.target.as_str(),
|
||||
];
|
||||
run_cmd(&args)?;
|
||||
results.push(MountResult {
|
||||
source,
|
||||
target: pm.target.clone(),
|
||||
fstype: pm.fstype.clone(),
|
||||
options: pm.options.clone(),
|
||||
});
|
||||
existing_mounts.insert(
|
||||
pm.target.clone(),
|
||||
ExistingMount {
|
||||
source: source.clone(),
|
||||
fstype: pm.fstype.clone(),
|
||||
options: pm.options.clone(),
|
||||
},
|
||||
);
|
||||
results_map.insert(
|
||||
pm.target.clone(),
|
||||
MountResult {
|
||||
source,
|
||||
target: pm.target.clone(),
|
||||
fstype: pm.fstype.clone(),
|
||||
options: pm.options.clone(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Subvolume creation (create-if-missing) and mounts for the primary
|
||||
@@ -245,14 +393,18 @@ pub fn apply_mounts(plan: &MountPlan) -> Result<Vec<MountResult>> {
|
||||
if !exists {
|
||||
// Create subvolume
|
||||
let subvol_path = format!("{}/{}", root, sm.name);
|
||||
let args = [btrfs_tool.as_str(), "subvolume", "create", subvol_path.as_str()];
|
||||
let args = [
|
||||
btrfs_tool.as_str(),
|
||||
"subvolume",
|
||||
"create",
|
||||
subvol_path.as_str(),
|
||||
];
|
||||
run_cmd(&args)?;
|
||||
}
|
||||
}
|
||||
} else if primary_kind == "bcachefs" {
|
||||
let bcachefs_tool = which_tool("bcachefs")?.ok_or_else(|| {
|
||||
Error::Mount("required tool 'bcachefs' not found in PATH".into())
|
||||
})?;
|
||||
let bcachefs_tool = which_tool("bcachefs")?
|
||||
.ok_or_else(|| Error::Mount("required tool 'bcachefs' not found in PATH".into()))?;
|
||||
for sm in &plan.subvol_mounts {
|
||||
if &sm.uuid != primary_uuid {
|
||||
continue;
|
||||
@@ -279,6 +431,31 @@ pub fn apply_mounts(plan: &MountPlan) -> Result<Vec<MountResult>> {
|
||||
// Subvol mounts
|
||||
for sm in &plan.subvol_mounts {
|
||||
let source = format!("UUID={}", sm.uuid);
|
||||
if let Some(existing) = existing_mounts.get(sm.target.as_str()) {
|
||||
if source_matches_uuid(&existing.source, &sm.uuid) {
|
||||
info!(
|
||||
"mount::apply_mounts: target {} already mounted; skipping",
|
||||
sm.target
|
||||
);
|
||||
let existing_fstype = existing.fstype.clone();
|
||||
let existing_options = existing.options.clone();
|
||||
results_map
|
||||
.entry(sm.target.clone())
|
||||
.or_insert_with(|| MountResult {
|
||||
source: source.clone(),
|
||||
target: sm.target.clone(),
|
||||
fstype: existing_fstype,
|
||||
options: existing_options,
|
||||
});
|
||||
continue;
|
||||
} else {
|
||||
return Err(Error::Mount(format!(
|
||||
"target {} already mounted by {} (expected UUID={})",
|
||||
sm.target, existing.source, sm.uuid
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
let args = [
|
||||
mount_tool.as_str(),
|
||||
"-t",
|
||||
@@ -289,14 +466,28 @@ pub fn apply_mounts(plan: &MountPlan) -> Result<Vec<MountResult>> {
|
||||
sm.target.as_str(),
|
||||
];
|
||||
run_cmd(&args)?;
|
||||
results.push(MountResult {
|
||||
source,
|
||||
target: sm.target.clone(),
|
||||
fstype: sm.fstype.clone(),
|
||||
options: sm.options.clone(),
|
||||
});
|
||||
existing_mounts.insert(
|
||||
sm.target.clone(),
|
||||
ExistingMount {
|
||||
source: source.clone(),
|
||||
fstype: sm.fstype.clone(),
|
||||
options: sm.options.clone(),
|
||||
},
|
||||
);
|
||||
results_map.insert(
|
||||
sm.target.clone(),
|
||||
MountResult {
|
||||
source,
|
||||
target: sm.target.clone(),
|
||||
fstype: sm.fstype.clone(),
|
||||
options: sm.options.clone(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let mut results: Vec<MountResult> = results_map.into_values().collect();
|
||||
results.sort_by(|a, b| a.target.cmp(&b.target));
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
@@ -306,24 +497,26 @@ pub fn maybe_write_fstab(mounts: &[MountResult], cfg: &Config) -> Result<()> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Filter only the four subvol targets
|
||||
// Partition mount results into runtime root mounts and final subvolume targets.
|
||||
let mut root_entries: Vec<&MountResult> = mounts
|
||||
.iter()
|
||||
.filter(|m| m.target.starts_with(ROOT_BASE) || m.target == BOOT_TARGET)
|
||||
.collect();
|
||||
let wanted = [TARGET_ETC, TARGET_MODULES, TARGET_SYSTEM, TARGET_VM_META];
|
||||
let mut entries: Vec<&MountResult> = mounts
|
||||
let mut subvol_entries: Vec<&MountResult> = mounts
|
||||
.iter()
|
||||
.filter(|m| wanted.contains(&m.target.as_str()))
|
||||
.collect();
|
||||
|
||||
// Sort by target path ascending to be deterministic
|
||||
entries.sort_by(|a, b| a.target.cmp(&b.target));
|
||||
// Sort by target path ascending to be deterministic (roots before subvols).
|
||||
root_entries.sort_by(|a, b| a.target.cmp(&b.target));
|
||||
subvol_entries.sort_by(|a, b| a.target.cmp(&b.target));
|
||||
|
||||
// Compose lines
|
||||
// Compose lines: include all root mounts first, followed by the four subvol targets.
|
||||
let mut lines: Vec<String> = Vec::new();
|
||||
for m in entries {
|
||||
for m in root_entries.into_iter().chain(subvol_entries.into_iter()) {
|
||||
// m.source already "UUID=..."
|
||||
let line = format!(
|
||||
"{} {} {} {} 0 0",
|
||||
m.source, m.target, m.fstype, m.options
|
||||
);
|
||||
let line = format!("{} {} {} {} 0 0", m.source, m.target, m.fstype, m.options);
|
||||
lines.push(line);
|
||||
}
|
||||
|
||||
@@ -352,4 +545,4 @@ pub fn maybe_write_fstab(mounts: &[MountResult], cfg: &Config) -> Result<()> {
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
//! Re-exports the concrete implementation from run.rs to avoid duplicating types/functions.
|
||||
|
||||
pub mod run;
|
||||
pub use run::*;
|
||||
pub use run::*;
|
||||
|
||||
@@ -43,13 +43,13 @@
|
||||
//! - Report generation and write
|
||||
|
||||
use crate::{
|
||||
types::Config,
|
||||
logging::LogOptions,
|
||||
device::{discover, DeviceFilter, Disk},
|
||||
idempotency,
|
||||
partition,
|
||||
fs as zfs,
|
||||
Error, Result,
|
||||
device::{DeviceFilter, Disk, discover},
|
||||
fs as zfs, idempotency,
|
||||
logging::LogOptions,
|
||||
partition,
|
||||
report::StateReport,
|
||||
types::{Config, Topology},
|
||||
};
|
||||
use humantime::format_rfc3339;
|
||||
use regex::Regex;
|
||||
@@ -75,6 +75,10 @@ pub struct Context {
|
||||
pub report_current: bool,
|
||||
/// Optional report path override (when provided by CLI --report).
|
||||
pub report_path_override: Option<String>,
|
||||
/// True when topology was provided via CLI (--topology), giving it precedence.
|
||||
pub topo_from_cli: bool,
|
||||
/// True when topology was provided via kernel cmdline, giving it precedence if CLI omitted it.
|
||||
pub topo_from_cmdline: bool,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
@@ -88,6 +92,8 @@ impl Context {
|
||||
mount_existing: false,
|
||||
report_current: false,
|
||||
report_path_override: None,
|
||||
topo_from_cli: false,
|
||||
topo_from_cmdline: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,6 +142,44 @@ impl Context {
|
||||
self.report_current = report_current;
|
||||
self
|
||||
}
|
||||
|
||||
/// Mark that topology was provided via CLI (--topology).
|
||||
pub fn with_topology_from_cli(mut self, v: bool) -> Self {
|
||||
self.topo_from_cli = v;
|
||||
self
|
||||
}
|
||||
|
||||
/// Mark that topology was provided via kernel cmdline (zosstorage.topology=).
|
||||
pub fn with_topology_from_cmdline(mut self, v: bool) -> Self {
|
||||
self.topo_from_cmdline = v;
|
||||
self
|
||||
}
|
||||
}
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum ProvisioningMode {
|
||||
Apply,
|
||||
Preview,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum AutoDecision {
|
||||
Apply,
|
||||
MountExisting,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct AutoSelection {
|
||||
decision: AutoDecision,
|
||||
fs_results: Option<Vec<zfs::FsResult>>,
|
||||
state: Option<StateReport>,
|
||||
}
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum ExecutionMode {
|
||||
ReportCurrent,
|
||||
MountExisting,
|
||||
Apply,
|
||||
Preview,
|
||||
Auto,
|
||||
}
|
||||
|
||||
/// Run the one-shot provisioning flow.
|
||||
@@ -143,119 +187,237 @@ impl Context {
|
||||
/// Returns Ok(()) on success and also on success-noop when already provisioned.
|
||||
/// Any validation or execution failure aborts with an error.
|
||||
pub fn run(ctx: &Context) -> Result<()> {
|
||||
info!("orchestrator: starting run() with topology {:?}", ctx.cfg.topology);
|
||||
info!("orchestrator: starting run()");
|
||||
|
||||
// Enforce mutually exclusive execution modes among: --mount-existing, --report-current, --apply
|
||||
let selected_modes =
|
||||
(ctx.mount_existing as u8) +
|
||||
(ctx.report_current as u8) +
|
||||
(ctx.apply as u8);
|
||||
(ctx.mount_existing as u8) + (ctx.report_current as u8) + (ctx.apply as u8);
|
||||
if selected_modes > 1 {
|
||||
return Err(Error::Validation(
|
||||
"choose only one mode: --mount-existing | --report-current | --apply".into(),
|
||||
));
|
||||
}
|
||||
|
||||
// Mode 1: Mount existing filesystems (non-destructive), based on on-disk headers.
|
||||
if ctx.mount_existing {
|
||||
info!("orchestrator: mount-existing mode");
|
||||
let fs_results = zfs::probe_existing_filesystems()?;
|
||||
if fs_results.is_empty() {
|
||||
return Err(Error::Mount(
|
||||
"no existing filesystems with reserved labels (ZOSBOOT/ZOSDATA) were found".into(),
|
||||
));
|
||||
}
|
||||
let mplan = crate::mount::plan_mounts(&fs_results, &ctx.cfg)?;
|
||||
let mres = crate::mount::apply_mounts(&mplan)?;
|
||||
crate::mount::maybe_write_fstab(&mres, &ctx.cfg)?;
|
||||
let preview_requested = ctx.show || ctx.report_path_override.is_some();
|
||||
|
||||
// Optional JSON summary for mount-existing
|
||||
if ctx.show || ctx.report_path_override.is_some() || ctx.report_current {
|
||||
let now = format_rfc3339(SystemTime::now()).to_string();
|
||||
let fs_json: Vec<serde_json::Value> = fs_results
|
||||
.iter()
|
||||
.map(|r| {
|
||||
let kind_str = match r.kind {
|
||||
zfs::FsKind::Vfat => "vfat",
|
||||
zfs::FsKind::Btrfs => "btrfs",
|
||||
zfs::FsKind::Bcachefs => "bcachefs",
|
||||
};
|
||||
json!({
|
||||
"kind": kind_str,
|
||||
"uuid": r.uuid,
|
||||
"label": r.label,
|
||||
"devices": r.devices,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let initial_mode = if ctx.report_current {
|
||||
ExecutionMode::ReportCurrent
|
||||
} else if ctx.mount_existing {
|
||||
ExecutionMode::MountExisting
|
||||
} else if ctx.apply {
|
||||
ExecutionMode::Apply
|
||||
} else if preview_requested {
|
||||
ExecutionMode::Preview
|
||||
} else {
|
||||
ExecutionMode::Auto
|
||||
};
|
||||
|
||||
let mounts_json: Vec<serde_json::Value> = mres
|
||||
.iter()
|
||||
.map(|m| {
|
||||
json!({
|
||||
"source": m.source,
|
||||
"target": m.target,
|
||||
"fstype": m.fstype,
|
||||
"options": m.options,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let summary = json!({
|
||||
"version": "v1",
|
||||
"timestamp": now,
|
||||
"status": "mounted_existing",
|
||||
"filesystems": fs_json,
|
||||
"mounts": mounts_json,
|
||||
});
|
||||
|
||||
if ctx.show || ctx.report_current {
|
||||
println!("{}", summary);
|
||||
}
|
||||
if let Some(path) = &ctx.report_path_override {
|
||||
fs::write(path, summary.to_string()).map_err(|e| {
|
||||
Error::Report(format!("failed to write report to {}: {}", path, e))
|
||||
})?;
|
||||
info!("orchestrator: wrote mount-existing report to {}", path);
|
||||
match initial_mode {
|
||||
ExecutionMode::ReportCurrent => run_report_current(ctx),
|
||||
ExecutionMode::MountExisting => run_mount_existing(ctx, None, None),
|
||||
ExecutionMode::Apply => run_provisioning(ctx, ProvisioningMode::Apply, None),
|
||||
ExecutionMode::Preview => run_provisioning(ctx, ProvisioningMode::Preview, None),
|
||||
ExecutionMode::Auto => {
|
||||
let selection = auto_select_mode(ctx)?;
|
||||
match selection.decision {
|
||||
AutoDecision::MountExisting => {
|
||||
run_mount_existing(ctx, selection.fs_results, selection.state)
|
||||
}
|
||||
AutoDecision::Apply => {
|
||||
run_provisioning(ctx, ProvisioningMode::Apply, selection.state)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
fn auto_select_mode(ctx: &Context) -> Result<AutoSelection> {
|
||||
info!("orchestrator: auto-selecting execution mode");
|
||||
let state = idempotency::detect_existing_state()?;
|
||||
let fs_results = zfs::probe_existing_filesystems()?;
|
||||
|
||||
if let Some(state) = state {
|
||||
info!("orchestrator: provisioned state detected; attempting mount-existing flow");
|
||||
return Ok(AutoSelection {
|
||||
decision: AutoDecision::MountExisting,
|
||||
fs_results: if fs_results.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(fs_results)
|
||||
},
|
||||
state: Some(state),
|
||||
});
|
||||
}
|
||||
|
||||
// Mode 3: Report current initialized filesystems and mounts (non-destructive).
|
||||
if ctx.report_current {
|
||||
info!("orchestrator: report-current mode");
|
||||
let fs_results = zfs::probe_existing_filesystems()?;
|
||||
if !fs_results.is_empty() {
|
||||
info!(
|
||||
"orchestrator: detected {} filesystem(s) with reserved labels; selecting mount-existing",
|
||||
fs_results.len()
|
||||
);
|
||||
return Ok(AutoSelection {
|
||||
decision: AutoDecision::MountExisting,
|
||||
fs_results: Some(fs_results),
|
||||
state: None,
|
||||
});
|
||||
}
|
||||
|
||||
// Parse /proc/mounts and include only our relevant targets.
|
||||
let mounts_content = fs::read_to_string("/proc/mounts").unwrap_or_default();
|
||||
let mounts_json: Vec<serde_json::Value> = mounts_content
|
||||
.lines()
|
||||
.filter_map(|line| {
|
||||
let mut it = line.split_whitespace();
|
||||
let source = it.next()?;
|
||||
let target = it.next()?;
|
||||
let fstype = it.next()?;
|
||||
let options = it.next().unwrap_or("");
|
||||
if target.starts_with("/var/mounts/")
|
||||
|| target == "/var/cache/system"
|
||||
|| target == "/var/cache/etc"
|
||||
|| target == "/var/cache/modules"
|
||||
|| target == "/var/cache/vm-meta"
|
||||
{
|
||||
Some(json!({
|
||||
"source": source,
|
||||
"target": target,
|
||||
"fstype": fstype,
|
||||
"options": options
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
info!(
|
||||
"orchestrator: no provisioned state or labeled filesystems detected; selecting apply mode (topology={:?})",
|
||||
ctx.cfg.topology
|
||||
);
|
||||
|
||||
Ok(AutoSelection {
|
||||
decision: AutoDecision::Apply,
|
||||
fs_results: None,
|
||||
state: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn run_report_current(ctx: &Context) -> Result<()> {
|
||||
info!("orchestrator: report-current mode");
|
||||
let fs_results = zfs::probe_existing_filesystems()?;
|
||||
|
||||
// Read all mounts, filtering common system/uninteresting ones
|
||||
let mounts_content = fs::read_to_string("/proc/mounts").unwrap_or_default();
|
||||
let mounts_json: Vec<serde_json::Value> = mounts_content
|
||||
.lines()
|
||||
.filter_map(|line| {
|
||||
let mut it = line.split_whitespace();
|
||||
let source = it.next()?;
|
||||
let target = it.next()?;
|
||||
let fstype = it.next()?;
|
||||
let options = it.next().unwrap_or("");
|
||||
|
||||
// Skip common pseudo/virtual filesystems and system mounts
|
||||
if source.starts_with("devtmpfs")
|
||||
|| source.starts_with("tmpfs")
|
||||
|| source.starts_with("proc")
|
||||
|| source.starts_with("sysfs")
|
||||
|| source.starts_with("cgroup")
|
||||
|| source.starts_with("bpf")
|
||||
|| source.starts_with("debugfs")
|
||||
|| source.starts_with("securityfs")
|
||||
|| source.starts_with("mqueue")
|
||||
|| source.starts_with("pstore")
|
||||
|| source.starts_with("tracefs")
|
||||
|| source.starts_with("hugetlbfs")
|
||||
|| source.starts_with("efivarfs")
|
||||
|| source.starts_with("systemd-1")
|
||||
|| target.starts_with("/proc")
|
||||
|| target.starts_with("/sys")
|
||||
|| target.starts_with("/dev")
|
||||
|| target.starts_with("/run")
|
||||
|| target.starts_with("/boot")
|
||||
|| target.starts_with("/efi")
|
||||
|| target.starts_with("/boot/efi")
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
// Include zosstorage target mounts and general data mounts
|
||||
Some(json!({
|
||||
"source": source,
|
||||
"target": target,
|
||||
"fstype": fstype,
|
||||
"options": options
|
||||
}))
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Read partition information from /proc/partitions
|
||||
let partitions_content = fs::read_to_string("/proc/partitions").unwrap_or_default();
|
||||
let partitions_json: Vec<serde_json::Value> = partitions_content
|
||||
.lines()
|
||||
.filter_map(|line| {
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line.starts_with("major") {
|
||||
return None;
|
||||
}
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.len() < 4 {
|
||||
return None;
|
||||
}
|
||||
let name = parts[3];
|
||||
// Skip pseudo devices
|
||||
if name.starts_with("loop")
|
||||
|| name.starts_with("ram")
|
||||
|| name.starts_with("zram")
|
||||
|| name.starts_with("fd")
|
||||
|| name.starts_with("dm-")
|
||||
|| name.starts_with("md")
|
||||
{
|
||||
return None;
|
||||
}
|
||||
let major: u32 = parts[0].parse().ok()?;
|
||||
let minor: u32 = parts[1].parse().ok()?;
|
||||
let size_kb: u64 = parts[2].parse().ok()?;
|
||||
Some(json!({
|
||||
"name": name,
|
||||
"major": major,
|
||||
"minor": minor,
|
||||
"size_kb": size_kb,
|
||||
"size_gib": size_kb / (1024 * 1024)
|
||||
}))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let fs_json: Vec<serde_json::Value> = fs_results
|
||||
.iter()
|
||||
.map(|r| {
|
||||
let kind_str = match r.kind {
|
||||
zfs::FsKind::Vfat => "vfat",
|
||||
zfs::FsKind::Btrfs => "btrfs",
|
||||
zfs::FsKind::Bcachefs => "bcachefs",
|
||||
};
|
||||
json!({
|
||||
"kind": kind_str,
|
||||
"uuid": r.uuid,
|
||||
"label": r.label,
|
||||
"devices": r.devices
|
||||
})
|
||||
.collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
let now = format_rfc3339(SystemTime::now()).to_string();
|
||||
let summary = json!({
|
||||
"version": "v1",
|
||||
"timestamp": now,
|
||||
"status": "observed",
|
||||
"partitions": partitions_json,
|
||||
"filesystems": fs_json,
|
||||
"mounts": mounts_json
|
||||
});
|
||||
|
||||
println!("{}", summary);
|
||||
if let Some(path) = &ctx.report_path_override {
|
||||
fs::write(path, summary.to_string())
|
||||
.map_err(|e| Error::Report(format!("failed to write report to {}: {}", path, e)))?;
|
||||
info!("orchestrator: wrote report-current to {}", path);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_mount_existing(
|
||||
ctx: &Context,
|
||||
fs_results_override: Option<Vec<zfs::FsResult>>,
|
||||
state_hint: Option<StateReport>,
|
||||
) -> Result<()> {
|
||||
info!("orchestrator: mount-existing mode");
|
||||
let fs_results = match fs_results_override {
|
||||
Some(results) => results,
|
||||
None => zfs::probe_existing_filesystems()?,
|
||||
};
|
||||
if fs_results.is_empty() {
|
||||
return Err(Error::Mount(
|
||||
"no existing filesystems with reserved labels (ZOSBOOT/ZOSDATA) were found".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let mplan = crate::mount::plan_mounts(&fs_results, &ctx.cfg)?;
|
||||
let mres = crate::mount::apply_mounts(&mplan)?;
|
||||
crate::mount::maybe_write_fstab(&mres, &ctx.cfg)?;
|
||||
|
||||
if ctx.show || ctx.report_path_override.is_some() || ctx.report_current {
|
||||
let now = format_rfc3339(SystemTime::now()).to_string();
|
||||
let fs_json: Vec<serde_json::Value> = fs_results
|
||||
.iter()
|
||||
.map(|r| {
|
||||
@@ -268,82 +430,109 @@ pub fn run(ctx: &Context) -> Result<()> {
|
||||
"kind": kind_str,
|
||||
"uuid": r.uuid,
|
||||
"label": r.label,
|
||||
"devices": r.devices
|
||||
"devices": r.devices,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let now = format_rfc3339(SystemTime::now()).to_string();
|
||||
let summary = json!({
|
||||
let mounts_json: Vec<serde_json::Value> = mres
|
||||
.iter()
|
||||
.map(|m| {
|
||||
json!({
|
||||
"source": m.source,
|
||||
"target": m.target,
|
||||
"fstype": m.fstype,
|
||||
"options": m.options,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut summary = json!({
|
||||
"version": "v1",
|
||||
"timestamp": now,
|
||||
"status": "observed",
|
||||
"status": "mounted_existing",
|
||||
"filesystems": fs_json,
|
||||
"mounts": mounts_json
|
||||
"mounts": mounts_json,
|
||||
});
|
||||
|
||||
// In report-current mode, default to stdout; also honor --report path when provided.
|
||||
println!("{}", summary);
|
||||
if let Some(path) = &ctx.report_path_override {
|
||||
fs::write(path, summary.to_string()).map_err(|e| {
|
||||
Error::Report(format!("failed to write report to {}: {}", path, e))
|
||||
})?;
|
||||
info!("orchestrator: wrote report-current to {}", path);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Default path: plan (and optionally apply) for empty-disk initialization workflow.
|
||||
|
||||
// 1) Idempotency pre-flight: if already provisioned, optionally emit summary then exit success.
|
||||
match idempotency::detect_existing_state()? {
|
||||
Some(state) => {
|
||||
info!("orchestrator: already provisioned");
|
||||
if ctx.show || ctx.report_path_override.is_some() {
|
||||
let now = format_rfc3339(SystemTime::now()).to_string();
|
||||
let state_json = to_value(&state)
|
||||
.map_err(|e| Error::Report(format!("failed to serialize StateReport: {}", e)))?;
|
||||
let summary = json!({
|
||||
"version": "v1",
|
||||
"timestamp": now,
|
||||
"status": "already_provisioned",
|
||||
"state": state_json
|
||||
});
|
||||
if ctx.show {
|
||||
println!("{}", summary);
|
||||
}
|
||||
if let Some(path) = &ctx.report_path_override {
|
||||
fs::write(path, summary.to_string()).map_err(|e| {
|
||||
Error::Report(format!("failed to write report to {}: {}", path, e))
|
||||
})?;
|
||||
info!("orchestrator: wrote idempotency report to {}", path);
|
||||
if let Some(state) = state_hint {
|
||||
if let Ok(state_json) = to_value(&state) {
|
||||
if let Some(obj) = summary.as_object_mut() {
|
||||
obj.insert("state".to_string(), state_json);
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
None => {
|
||||
debug!("orchestrator: not provisioned; continuing");
|
||||
|
||||
if ctx.show || ctx.report_current {
|
||||
println!("{}", summary);
|
||||
}
|
||||
if let Some(path) = &ctx.report_path_override {
|
||||
fs::write(path, summary.to_string())
|
||||
.map_err(|e| Error::Report(format!("failed to write report to {}: {}", path, e)))?;
|
||||
info!("orchestrator: wrote mount-existing report to {}", path);
|
||||
}
|
||||
}
|
||||
|
||||
// 2) Device discovery using compiled filter from config.
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_provisioning(
|
||||
ctx: &Context,
|
||||
mode: ProvisioningMode,
|
||||
state_hint: Option<StateReport>,
|
||||
) -> Result<()> {
|
||||
let preview_outputs = ctx.show || ctx.report_path_override.is_some();
|
||||
|
||||
let mut state_opt = state_hint;
|
||||
if state_opt.is_none() {
|
||||
state_opt = idempotency::detect_existing_state()?;
|
||||
}
|
||||
|
||||
if let Some(state) = state_opt {
|
||||
info!("orchestrator: already provisioned; ensuring mounts are active");
|
||||
return run_mount_existing(ctx, None, Some(state));
|
||||
}
|
||||
|
||||
let filter = build_device_filter(&ctx.cfg)?;
|
||||
let disks = discover(&filter)?;
|
||||
info!("orchestrator: discovered {} eligible disk(s)", disks.len());
|
||||
|
||||
// 3) Emptiness enforcement: skip in preview mode (--show/--report) to allow planning output.
|
||||
let preview = ctx.show || ctx.report_path_override.is_some();
|
||||
if ctx.cfg.partitioning.require_empty_disks && !preview {
|
||||
enforce_empty_disks(&disks)?;
|
||||
info!("orchestrator: all target disks verified empty");
|
||||
} else if ctx.cfg.partitioning.require_empty_disks && preview {
|
||||
warn!("orchestrator: preview mode detected (--show/--report); skipping empty-disk enforcement");
|
||||
} else {
|
||||
if ctx.cfg.partitioning.require_empty_disks {
|
||||
if matches!(mode, ProvisioningMode::Apply) {
|
||||
enforce_empty_disks(&disks)?;
|
||||
info!("orchestrator: all target disks verified empty");
|
||||
} else {
|
||||
warn!(
|
||||
"orchestrator: preview mode detected (--show/--report); skipping empty-disk enforcement"
|
||||
);
|
||||
}
|
||||
} else if matches!(mode, ProvisioningMode::Apply) {
|
||||
warn!("orchestrator: require_empty_disks=false; proceeding without emptiness enforcement");
|
||||
}
|
||||
|
||||
// 4) Partition planning (declarative).
|
||||
let plan = partition::plan_partitions(&disks, &ctx.cfg)?;
|
||||
let effective_cfg = {
|
||||
let mut c = ctx.cfg.clone();
|
||||
if !(ctx.topo_from_cli || ctx.topo_from_cmdline) {
|
||||
let auto_topo = if disks.len() == 1 {
|
||||
Topology::BtrfsSingle
|
||||
} else if disks.len() == 2 {
|
||||
Topology::DualIndependent
|
||||
} else {
|
||||
Topology::BtrfsRaid1
|
||||
};
|
||||
if c.topology != auto_topo {
|
||||
info!("orchestrator: topology auto-selected {:?}", auto_topo);
|
||||
c.topology = auto_topo;
|
||||
} else {
|
||||
info!("orchestrator: using configured topology {:?}", c.topology);
|
||||
}
|
||||
} else {
|
||||
info!("orchestrator: using overridden topology {:?}", c.topology);
|
||||
}
|
||||
c
|
||||
};
|
||||
|
||||
let plan = partition::plan_partitions(&disks, &effective_cfg)?;
|
||||
debug!(
|
||||
"orchestrator: partition plan ready (alignment={} MiB, disks={})",
|
||||
plan.alignment_mib,
|
||||
@@ -353,8 +542,7 @@ pub fn run(ctx: &Context) -> Result<()> {
|
||||
debug!("plan for {}: {} part(s)", dp.disk.path, dp.parts.len());
|
||||
}
|
||||
|
||||
// Apply mode: perform destructive partition application now.
|
||||
if ctx.apply {
|
||||
if matches!(mode, ProvisioningMode::Apply) {
|
||||
info!("orchestrator: apply mode enabled; applying partition plan");
|
||||
let part_results = partition::apply_partitions(&plan)?;
|
||||
info!(
|
||||
@@ -363,37 +551,32 @@ pub fn run(ctx: &Context) -> Result<()> {
|
||||
part_results.len()
|
||||
);
|
||||
|
||||
// Filesystem planning and creation
|
||||
let fs_plan = zfs::plan_filesystems(&part_results, &ctx.cfg)?;
|
||||
let fs_plan = zfs::plan_filesystems(&part_results, &effective_cfg)?;
|
||||
info!(
|
||||
"orchestrator: filesystem plan contains {} spec(s)",
|
||||
fs_plan.specs.len()
|
||||
);
|
||||
let fs_results = zfs::make_filesystems(&fs_plan, &ctx.cfg)?;
|
||||
let fs_results = zfs::make_filesystems(&fs_plan, &effective_cfg)?;
|
||||
info!("orchestrator: created {} filesystem(s)", fs_results.len());
|
||||
|
||||
// Mount planning and application
|
||||
let mplan = crate::mount::plan_mounts(&fs_results, &ctx.cfg)?;
|
||||
let mplan = crate::mount::plan_mounts(&fs_results, &effective_cfg)?;
|
||||
let mres = crate::mount::apply_mounts(&mplan)?;
|
||||
crate::mount::maybe_write_fstab(&mres, &ctx.cfg)?;
|
||||
crate::mount::maybe_write_fstab(&mres, &effective_cfg)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Preview-only path
|
||||
info!("orchestrator: pre-flight complete (idempotency checked, devices discovered, plan computed)");
|
||||
info!(
|
||||
"orchestrator: pre-flight complete (idempotency checked, devices discovered, plan computed)"
|
||||
);
|
||||
|
||||
// Optional: emit JSON summary via --show or write via --report
|
||||
if ctx.show || ctx.report_path_override.is_some() {
|
||||
let summary = build_summary_json(&disks, &plan, &ctx.cfg)?;
|
||||
if preview_outputs {
|
||||
let summary = build_summary_json(&disks, &plan, &effective_cfg)?;
|
||||
if ctx.show {
|
||||
// Print compact JSON to stdout
|
||||
println!("{}", summary);
|
||||
}
|
||||
if let Some(path) = &ctx.report_path_override {
|
||||
// Best-effort write (non-atomic for now, pending report::write_report implementation)
|
||||
fs::write(path, summary.to_string()).map_err(|e| {
|
||||
Error::Report(format!("failed to write report to {}: {}", path, e))
|
||||
})?;
|
||||
fs::write(path, summary.to_string())
|
||||
.map_err(|e| Error::Report(format!("failed to write report to {}: {}", path, e)))?;
|
||||
info!("orchestrator: wrote summary report to {}", path);
|
||||
}
|
||||
}
|
||||
@@ -413,15 +596,13 @@ fn build_device_filter(cfg: &Config) -> Result<DeviceFilter> {
|
||||
let mut exclude = Vec::new();
|
||||
|
||||
for pat in &cfg.device_selection.include_patterns {
|
||||
let re = Regex::new(pat).map_err(|e| {
|
||||
Error::Validation(format!("invalid include regex '{}': {}", pat, e))
|
||||
})?;
|
||||
let re = Regex::new(pat)
|
||||
.map_err(|e| Error::Validation(format!("invalid include regex '{}': {}", pat, e)))?;
|
||||
include.push(re);
|
||||
}
|
||||
for pat in &cfg.device_selection.exclude_patterns {
|
||||
let re = Regex::new(pat).map_err(|e| {
|
||||
Error::Validation(format!("invalid exclude regex '{}': {}", pat, e))
|
||||
})?;
|
||||
let re = Regex::new(pat)
|
||||
.map_err(|e| Error::Validation(format!("invalid exclude regex '{}': {}", pat, e)))?;
|
||||
exclude.push(re);
|
||||
}
|
||||
|
||||
@@ -475,7 +656,11 @@ fn role_str(role: partition::PartRole) -> &'static str {
|
||||
/// - mount: scheme summary and target template (e.g., "/var/cache/{UUID}")
|
||||
///
|
||||
/// This function is non-destructive and performs no probing beyond the provided inputs.
|
||||
fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Config) -> Result<serde_json::Value> {
|
||||
fn build_summary_json(
|
||||
disks: &[Disk],
|
||||
plan: &partition::PartitionPlan,
|
||||
cfg: &Config,
|
||||
) -> Result<serde_json::Value> {
|
||||
// Disks summary
|
||||
let disks_json: Vec<serde_json::Value> = disks
|
||||
.iter()
|
||||
@@ -511,14 +696,7 @@ fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Con
|
||||
}
|
||||
|
||||
// Decide filesystem kinds and planned mountpoints (template) from plan + cfg.topology
|
||||
let topo_str = match cfg.topology {
|
||||
crate::types::Topology::BtrfsSingle => "btrfs_single",
|
||||
crate::types::Topology::BcachefsSingle => "bcachefs_single",
|
||||
crate::types::Topology::DualIndependent => "dual_independent",
|
||||
crate::types::Topology::SsdHddBcachefs => "ssd_hdd_bcachefs",
|
||||
crate::types::Topology::Bcachefs2Copy => "bcachefs2_copy",
|
||||
crate::types::Topology::BtrfsRaid1 => "btrfs_raid1",
|
||||
};
|
||||
let topo_str = cfg.topology.to_string();
|
||||
|
||||
// Count roles across plan to infer filesystems
|
||||
let mut esp_count = 0usize;
|
||||
@@ -614,4 +792,4 @@ fn build_summary_json(disks: &[Disk], plan: &partition::PartitionPlan, cfg: &Con
|
||||
});
|
||||
|
||||
Ok(summary)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,4 +9,4 @@
|
||||
|
||||
pub mod plan;
|
||||
|
||||
pub use plan::*;
|
||||
pub use plan::*;
|
||||
|
||||
@@ -19,12 +19,12 @@
|
||||
// ext: device-specific alignment or reserved areas configurable via cfg in the future.
|
||||
// REGION: EXTENSION_POINTS-END
|
||||
//
|
||||
// REGION: SAFETY
|
||||
// safety: must verify require_empty_disks before any modification.
|
||||
// safety: when UEFI-booted, suppress creating BIOS boot partition to avoid unnecessary ef02 on UEFI systems.
|
||||
// safety: must ensure unique partition GUIDs; identical labels are allowed when expected (e.g., ESP ZOSBOOT).
|
||||
// safety: must call udev settle after partition table writes.
|
||||
// REGION: SAFETY-END
|
||||
// REGION: SAFETY
|
||||
// safety: must verify require_empty_disks before any modification.
|
||||
// safety: when UEFI-booted, suppress creating BIOS boot partition to avoid unnecessary ef02 on UEFI systems.
|
||||
// safety: must ensure unique partition GUIDs; identical labels are allowed when expected (e.g., ESP ZOSBOOT).
|
||||
// safety: must call udev settle after partition table writes.
|
||||
// REGION: SAFETY-END
|
||||
//
|
||||
// REGION: ERROR_MAPPING
|
||||
// errmap: external tool failure -> crate::Error::Tool { tool, status, stderr }.
|
||||
@@ -44,11 +44,11 @@
|
||||
//! [fn apply_partitions](plan.rs:1).
|
||||
|
||||
use crate::{
|
||||
types::{Config, Topology},
|
||||
device::Disk,
|
||||
util::{run_cmd, run_cmd_capture, which_tool, udev_settle, is_efi_boot},
|
||||
idempotency,
|
||||
Error, Result,
|
||||
device::Disk,
|
||||
idempotency,
|
||||
types::{Config, Topology},
|
||||
util::{is_efi_boot, run_cmd, run_cmd_capture, udev_settle, which_tool},
|
||||
};
|
||||
use tracing::{debug, warn};
|
||||
|
||||
@@ -117,20 +117,20 @@ pub struct PartitionResult {
|
||||
pub device_path: String,
|
||||
}
|
||||
|
||||
/// Compute GPT-only plan per topology and constraints.
|
||||
///
|
||||
/// Layout defaults:
|
||||
/// - BIOS boot: cfg.partitioning.bios_boot if enabled (size_mib)
|
||||
/// - ESP: cfg.partitioning.esp.size_mib, GPT name cfg.partitioning.esp.gpt_name (typically "zosboot")
|
||||
/// - Data: remainder, GPT name cfg.partitioning.data.gpt_name ("zosdata")
|
||||
/// - Cache (only for SSD/HDD topology): remainder on SSD after boot/ESP, GPT name cfg.partitioning.cache.gpt_name ("zoscache")
|
||||
///
|
||||
/// Topology mapping:
|
||||
/// - Single: use first eligible disk; create BIOS (opt) + ESP + Data
|
||||
/// - DualIndependent: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
|
||||
/// - BtrfsRaid1: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
|
||||
/// - SsdHddBcachefs: need >=1 SSD (rotational=false) and >=1 HDD (rotational=true);
|
||||
/// SSD: BIOS (opt) + ESP + Cache; HDD: Data
|
||||
/// Compute GPT-only plan per topology and constraints.
|
||||
///
|
||||
/// Layout defaults:
|
||||
/// - BIOS boot: cfg.partitioning.bios_boot if enabled (size_mib)
|
||||
/// - ESP: cfg.partitioning.esp.size_mib, GPT name cfg.partitioning.esp.gpt_name (typically "zosboot")
|
||||
/// - Data: remainder, GPT name cfg.partitioning.data.gpt_name ("zosdata")
|
||||
/// - Cache (only for SSD/HDD topology): remainder on SSD after boot/ESP, GPT name cfg.partitioning.cache.gpt_name ("zoscache")
|
||||
///
|
||||
/// Topology mapping:
|
||||
/// - Single: use first eligible disk; create BIOS (opt) + ESP + Data
|
||||
/// - DualIndependent: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
|
||||
/// - BtrfsRaid1: need at least 2 disks; disk0: BIOS (opt) + ESP + Data, disk1: Data
|
||||
/// - SsdHddBcachefs: need >=1 SSD (rotational=false) and >=1 HDD (rotational=true);
|
||||
/// SSD: BIOS (opt) + ESP + Cache; HDD: Data
|
||||
pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
let align = cfg.partitioning.alignment_mib;
|
||||
let require_empty = cfg.partitioning.require_empty_disks;
|
||||
@@ -138,7 +138,9 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
let add_bios = cfg.partitioning.bios_boot.enabled && !is_efi_boot();
|
||||
|
||||
if disks.is_empty() {
|
||||
return Err(Error::Partition("no disks provided to partition planner".into()));
|
||||
return Err(Error::Partition(
|
||||
"no disks provided to partition planner".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut plans: Vec<DiskPlan> = Vec::new();
|
||||
@@ -164,7 +166,10 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: d0.clone(), parts });
|
||||
plans.push(DiskPlan {
|
||||
disk: d0.clone(),
|
||||
parts,
|
||||
});
|
||||
}
|
||||
Topology::BcachefsSingle => {
|
||||
let d0 = &disks[0];
|
||||
@@ -186,11 +191,16 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: d0.clone(), parts });
|
||||
plans.push(DiskPlan {
|
||||
disk: d0.clone(),
|
||||
parts,
|
||||
});
|
||||
}
|
||||
Topology::DualIndependent => {
|
||||
if disks.len() < 2 {
|
||||
return Err(Error::Partition("DualIndependent topology requires at least 2 disks".into()));
|
||||
return Err(Error::Partition(
|
||||
"DualIndependent topology requires at least 2 disks".into(),
|
||||
));
|
||||
}
|
||||
let d0 = &disks[0];
|
||||
let d1 = &disks[1];
|
||||
@@ -214,7 +224,10 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: d0.clone(), parts: parts0 });
|
||||
plans.push(DiskPlan {
|
||||
disk: d0.clone(),
|
||||
parts: parts0,
|
||||
});
|
||||
|
||||
// Disk 1: Data only
|
||||
let mut parts1 = Vec::new();
|
||||
@@ -223,11 +236,16 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
|
||||
plans.push(DiskPlan {
|
||||
disk: d1.clone(),
|
||||
parts: parts1,
|
||||
});
|
||||
}
|
||||
Topology::BtrfsRaid1 => {
|
||||
if disks.len() < 2 {
|
||||
return Err(Error::Partition("BtrfsRaid1 topology requires at least 2 disks".into()));
|
||||
return Err(Error::Partition(
|
||||
"BtrfsRaid1 topology requires at least 2 disks".into(),
|
||||
));
|
||||
}
|
||||
let d0 = &disks[0];
|
||||
let d1 = &disks[1];
|
||||
@@ -251,7 +269,10 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: d0.clone(), parts: parts0 });
|
||||
plans.push(DiskPlan {
|
||||
disk: d0.clone(),
|
||||
parts: parts0,
|
||||
});
|
||||
|
||||
// Disk 1: Data only (for RAID1)
|
||||
let mut parts1 = Vec::new();
|
||||
@@ -260,11 +281,16 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
|
||||
plans.push(DiskPlan {
|
||||
disk: d1.clone(),
|
||||
parts: parts1,
|
||||
});
|
||||
}
|
||||
Topology::Bcachefs2Copy => {
|
||||
if disks.len() < 2 {
|
||||
return Err(Error::Partition("Bcachefs2Copy topology requires at least 2 disks".into()));
|
||||
return Err(Error::Partition(
|
||||
"Bcachefs2Copy topology requires at least 2 disks".into(),
|
||||
));
|
||||
}
|
||||
let d0 = &disks[0];
|
||||
let d1 = &disks[1];
|
||||
@@ -288,7 +314,10 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: d0.clone(), parts: parts0 });
|
||||
plans.push(DiskPlan {
|
||||
disk: d0.clone(),
|
||||
parts: parts0,
|
||||
});
|
||||
|
||||
// Disk 1: Data only
|
||||
let mut parts1 = Vec::new();
|
||||
@@ -297,14 +326,19 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: d1.clone(), parts: parts1 });
|
||||
plans.push(DiskPlan {
|
||||
disk: d1.clone(),
|
||||
parts: parts1,
|
||||
});
|
||||
}
|
||||
Topology::SsdHddBcachefs => {
|
||||
// Choose SSD (rotational=false) and HDD (rotational=true)
|
||||
let ssd = disks.iter().find(|d| !d.rotational)
|
||||
.ok_or_else(|| Error::Partition("SsdHddBcachefs requires an SSD (non-rotational) disk".into()))?;
|
||||
let hdd = disks.iter().find(|d| d.rotational)
|
||||
.ok_or_else(|| Error::Partition("SsdHddBcachefs requires an HDD (rotational) disk".into()))?;
|
||||
let ssd = disks.iter().find(|d| !d.rotational).ok_or_else(|| {
|
||||
Error::Partition("SsdHddBcachefs requires an SSD (non-rotational) disk".into())
|
||||
})?;
|
||||
let hdd = disks.iter().find(|d| d.rotational).ok_or_else(|| {
|
||||
Error::Partition("SsdHddBcachefs requires an HDD (rotational) disk".into())
|
||||
})?;
|
||||
|
||||
// SSD: BIOS (opt) + ESP + Cache remainder
|
||||
let mut parts_ssd = Vec::new();
|
||||
@@ -325,7 +359,10 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.cache.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: ssd.clone(), parts: parts_ssd });
|
||||
plans.push(DiskPlan {
|
||||
disk: ssd.clone(),
|
||||
parts: parts_ssd,
|
||||
});
|
||||
|
||||
// HDD: Data remainder
|
||||
let mut parts_hdd = Vec::new();
|
||||
@@ -334,7 +371,10 @@ pub fn plan_partitions(disks: &[Disk], cfg: &Config) -> Result<PartitionPlan> {
|
||||
size_mib: None,
|
||||
gpt_name: cfg.partitioning.data.gpt_name.clone(),
|
||||
});
|
||||
plans.push(DiskPlan { disk: hdd.clone(), parts: parts_hdd });
|
||||
plans.push(DiskPlan {
|
||||
disk: hdd.clone(),
|
||||
parts: parts_hdd,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,10 +429,17 @@ pub fn apply_partitions(plan: &PartitionPlan) -> Result<Vec<PartitionResult>> {
|
||||
if let Some(blockdev) = which_tool("blockdev")? {
|
||||
let out = run_cmd_capture(&[blockdev.as_str(), "--getss", disk_path])?;
|
||||
let s = out.stdout.trim();
|
||||
return s.parse::<u64>()
|
||||
.map_err(|e| Error::Partition(format!("failed to parse sector size from blockdev for {}: {}", disk_path, e)));
|
||||
return s.parse::<u64>().map_err(|e| {
|
||||
Error::Partition(format!(
|
||||
"failed to parse sector size from blockdev for {}: {}",
|
||||
disk_path, e
|
||||
))
|
||||
});
|
||||
}
|
||||
warn!("blockdev not found; assuming 512-byte sectors for {}", disk_path);
|
||||
warn!(
|
||||
"blockdev not found; assuming 512-byte sectors for {}",
|
||||
disk_path
|
||||
);
|
||||
Ok(512)
|
||||
}
|
||||
|
||||
@@ -410,20 +457,29 @@ pub fn apply_partitions(plan: &PartitionPlan) -> Result<Vec<PartitionResult>> {
|
||||
// Format: "First sector: 2048 (at 1024.0 KiB)"
|
||||
let val = rest.trim().split_whitespace().next().unwrap_or("");
|
||||
if !val.is_empty() {
|
||||
first = Some(val.parse::<u64>().map_err(|e| Error::Partition(format!("parse first sector: {}", e)))?);
|
||||
first = Some(
|
||||
val.parse::<u64>()
|
||||
.map_err(|e| Error::Partition(format!("parse first sector: {}", e)))?,
|
||||
);
|
||||
}
|
||||
} else if let Some(rest) = line.strip_prefix("Last sector:") {
|
||||
let val = rest.trim().split_whitespace().next().unwrap_or("");
|
||||
if !val.is_empty() {
|
||||
last = Some(val.parse::<u64>().map_err(|e| Error::Partition(format!("parse last sector: {}", e)))?);
|
||||
last = Some(
|
||||
val.parse::<u64>()
|
||||
.map_err(|e| Error::Partition(format!("parse last sector: {}", e)))?,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let first = first.ok_or_else(|| Error::Partition("sgdisk -i missing First sector".into()))?;
|
||||
let first =
|
||||
first.ok_or_else(|| Error::Partition("sgdisk -i missing First sector".into()))?;
|
||||
let last = last.ok_or_else(|| Error::Partition("sgdisk -i missing Last sector".into()))?;
|
||||
if guid.is_empty() {
|
||||
return Err(Error::Partition("sgdisk -i missing Partition unique GUID".into()));
|
||||
return Err(Error::Partition(
|
||||
"sgdisk -i missing Partition unique GUID".into(),
|
||||
));
|
||||
}
|
||||
Ok((guid, first, last))
|
||||
}
|
||||
@@ -467,9 +523,12 @@ pub fn apply_partitions(plan: &PartitionPlan) -> Result<Vec<PartitionResult>> {
|
||||
|
||||
run_cmd(&[
|
||||
sgdisk.as_str(),
|
||||
"-n", n_arg.as_str(),
|
||||
"-t", t_arg.as_str(),
|
||||
"-c", c_arg.as_str(),
|
||||
"-n",
|
||||
n_arg.as_str(),
|
||||
"-t",
|
||||
t_arg.as_str(),
|
||||
"-c",
|
||||
c_arg.as_str(),
|
||||
disk_path,
|
||||
])?;
|
||||
}
|
||||
@@ -486,11 +545,7 @@ pub fn apply_partitions(plan: &PartitionPlan) -> Result<Vec<PartitionResult>> {
|
||||
|
||||
// Query sgdisk for partition info
|
||||
let i_arg = format!("{}", part_num);
|
||||
let info_out = run_cmd_capture(&[
|
||||
sgdisk.as_str(),
|
||||
"-i", i_arg.as_str(),
|
||||
disk_path,
|
||||
])?;
|
||||
let info_out = run_cmd_capture(&[sgdisk.as_str(), "-i", i_arg.as_str(), disk_path])?;
|
||||
|
||||
let (unique_guid, first_sector, last_sector) = parse_sgdisk_info(&info_out.stdout)?;
|
||||
let sectors = if last_sector >= first_sector {
|
||||
@@ -516,6 +571,9 @@ pub fn apply_partitions(plan: &PartitionPlan) -> Result<Vec<PartitionResult>> {
|
||||
}
|
||||
}
|
||||
|
||||
debug!("apply_partitions: created {} partition entries", results.len());
|
||||
debug!(
|
||||
"apply_partitions: created {} partition entries",
|
||||
results.len()
|
||||
);
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,4 +9,4 @@
|
||||
|
||||
pub mod state;
|
||||
|
||||
pub use state::*;
|
||||
pub use state::*;
|
||||
|
||||
@@ -77,4 +77,4 @@ pub fn build_report(
|
||||
/// Write the state report JSON to disk (default path in config: /run/zosstorage/state.json).
|
||||
pub fn write_report(_report: &StateReport, _path: &str) -> Result<()> {
|
||||
todo!("serialize to JSON and persist atomically via tempfile and rename")
|
||||
}
|
||||
}
|
||||
|
||||
27
src/types.rs
27
src/types.rs
@@ -15,15 +15,25 @@
|
||||
// - Keep field names and enums stable; update docs/SCHEMA.md when public surface changes.
|
||||
// REGION: RESPONSIBILITIES-END
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use clap::ValueEnum;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Reserved filesystem labels.
|
||||
pub const LABEL_ZOSBOOT: &str = "ZOSBOOT";
|
||||
pub const LABEL_ZOSDATA: &str = "ZOSDATA";
|
||||
pub const LABEL_ZOSCACHE: &str = "ZOSCACHE";
|
||||
|
||||
/// Reserved GPT partition names.
|
||||
pub const GPT_NAME_ZOSBOOT: &str = "zosboot";
|
||||
pub const GPT_NAME_ZOSDATA: &str = "zosdata";
|
||||
pub const GPT_NAME_ZOSCACHE: &str = "zoscache";
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LoggingConfig {
|
||||
/// Log level: "error" | "warn" | "info" | "debug"
|
||||
pub level: String, // default "info"
|
||||
pub level: String, // default "info"
|
||||
/// When true, also log to /run/zosstorage/zosstorage.log
|
||||
pub to_file: bool, // default false
|
||||
pub to_file: bool, // default false
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -38,7 +48,7 @@ pub struct DeviceSelection {
|
||||
pub min_size_gib: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, ValueEnum)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, ValueEnum)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[value(rename_all = "snake_case")]
|
||||
pub enum Topology {
|
||||
@@ -55,7 +65,9 @@ pub enum Topology {
|
||||
#[value(alias = "ssd-hdd-bcachefs")]
|
||||
SsdHddBcachefs,
|
||||
/// Multi-device bcachefs with two replicas (data+metadata).
|
||||
#[value(alias = "bcachefs2-copy", alias = "bcachefs-2copy", alias = "bcachefs-2-copy")]
|
||||
/// Canonical token: bcachefs-2copy
|
||||
#[serde(rename = "bcachefs-2copy")]
|
||||
#[value(alias = "bcachefs-2copy")]
|
||||
Bcachefs2Copy,
|
||||
/// Optional mirrored btrfs across two disks when explicitly requested.
|
||||
#[value(alias = "btrfs-raid1")]
|
||||
@@ -69,7 +81,8 @@ impl std::fmt::Display for Topology {
|
||||
Topology::BcachefsSingle => "bcachefs_single",
|
||||
Topology::DualIndependent => "dual_independent",
|
||||
Topology::SsdHddBcachefs => "ssd_hdd_bcachefs",
|
||||
Topology::Bcachefs2Copy => "bcachefs2_copy",
|
||||
// Canonical single notation for two-copy bcachefs topology
|
||||
Topology::Bcachefs2Copy => "bcachefs-2copy",
|
||||
Topology::BtrfsRaid1 => "btrfs_raid1",
|
||||
};
|
||||
f.write_str(s)
|
||||
@@ -205,4 +218,4 @@ pub struct Config {
|
||||
pub mount: MountScheme,
|
||||
/// Report output configuration.
|
||||
pub report: ReportOptions,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,8 +40,8 @@
|
||||
//! and consistent error handling.
|
||||
|
||||
use crate::{Error, Result};
|
||||
use std::process::Command;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
/// Captured output from an external tool invocation.
|
||||
@@ -77,9 +77,10 @@ pub fn run_cmd(args: &[&str]) -> Result<()> {
|
||||
)));
|
||||
}
|
||||
debug!(target: "util.run_cmd", "exec: {:?}", args);
|
||||
let output = Command::new(args[0]).args(&args[1..]).output().map_err(|e| {
|
||||
Error::Other(anyhow::anyhow!("failed to spawn {:?}: {}", args, e))
|
||||
})?;
|
||||
let output = Command::new(args[0])
|
||||
.args(&args[1..])
|
||||
.output()
|
||||
.map_err(|e| Error::Other(anyhow::anyhow!("failed to spawn {:?}: {}", args, e)))?;
|
||||
|
||||
let status_code = output.status.code().unwrap_or(-1);
|
||||
if !output.status.success() {
|
||||
@@ -103,9 +104,10 @@ pub fn run_cmd_capture(args: &[&str]) -> Result<CmdOutput> {
|
||||
)));
|
||||
}
|
||||
debug!(target: "util.run_cmd_capture", "exec: {:?}", args);
|
||||
let output = Command::new(args[0]).args(&args[1..]).output().map_err(|e| {
|
||||
Error::Other(anyhow::anyhow!("failed to spawn {:?}: {}", args, e))
|
||||
})?;
|
||||
let output = Command::new(args[0])
|
||||
.args(&args[1..])
|
||||
.output()
|
||||
.map_err(|e| Error::Other(anyhow::anyhow!("failed to spawn {:?}: {}", args, e)))?;
|
||||
let status_code = output.status.code().unwrap_or(-1);
|
||||
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
|
||||
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
|
||||
@@ -205,4 +207,4 @@ mod tests {
|
||||
// Should never fail even if udevadm is missing.
|
||||
udev_settle(1000).expect("udev_settle should be non-fatal");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user