816 lines
27 KiB
Rust
816 lines
27 KiB
Rust
use serde::{Deserialize, Serialize};
|
|
use std::error::Error;
|
|
use std::fmt;
|
|
use std::fs;
|
|
use std::path::{Path, PathBuf};
|
|
use std::thread;
|
|
use std::time::Duration;
|
|
use std::collections::hash_map::DefaultHasher;
|
|
use std::hash::{Hash, Hasher};
|
|
|
|
use sal_os;
|
|
use sal_process;
|
|
use crate::qcow2;
|
|
|
|
pub mod builder;
|
|
|
|
/// Error type for Cloud Hypervisor operations
|
|
#[derive(Debug)]
|
|
pub enum CloudHvError {
|
|
CommandFailed(String),
|
|
IoError(String),
|
|
JsonError(String),
|
|
DependencyMissing(String),
|
|
InvalidSpec(String),
|
|
NotFound(String),
|
|
}
|
|
|
|
impl fmt::Display for CloudHvError {
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
match self {
|
|
CloudHvError::CommandFailed(e) => write!(f, "{}", e),
|
|
CloudHvError::IoError(e) => write!(f, "IO error: {}", e),
|
|
CloudHvError::JsonError(e) => write!(f, "JSON error: {}", e),
|
|
CloudHvError::DependencyMissing(e) => write!(f, "Dependency missing: {}", e),
|
|
CloudHvError::InvalidSpec(e) => write!(f, "Invalid spec: {}", e),
|
|
CloudHvError::NotFound(e) => write!(f, "{}", e),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl Error for CloudHvError {}
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub struct VmSpec {
|
|
pub id: String,
|
|
/// Optional for firmware boot; required for direct kernel boot
|
|
pub kernel_path: Option<String>,
|
|
/// Optional initramfs when using direct kernel boot
|
|
pub initramfs_path: Option<String>,
|
|
/// Optional for direct kernel boot; required for firmware boot
|
|
pub firmware_path: Option<String>,
|
|
/// Disk image path (qcow2 or raw)
|
|
pub disk_path: String,
|
|
/// API socket path for ch-remote and management
|
|
pub api_socket: String,
|
|
/// vCPUs to boot with
|
|
pub vcpus: u32,
|
|
/// Memory in MB
|
|
pub memory_mb: u32,
|
|
/// Kernel cmdline (only used for direct kernel boot)
|
|
pub cmdline: Option<String>,
|
|
/// Extra args (raw) if you need to extend; keep minimal for Phase 2
|
|
pub extra_args: Option<Vec<String>>,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub struct VmRuntime {
|
|
/// PID of cloud-hypervisor process if running
|
|
pub pid: Option<i64>,
|
|
/// Last known status: "stopped" | "running"
|
|
pub status: String,
|
|
/// Console log file path
|
|
pub log_file: String,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub struct VmRecord {
|
|
pub spec: VmSpec,
|
|
pub runtime: VmRuntime,
|
|
}
|
|
|
|
fn ensure_deps() -> Result<(), CloudHvError> {
|
|
if sal_process::which("cloud-hypervisor-static").is_none() {
|
|
return Err(CloudHvError::DependencyMissing(
|
|
"cloud-hypervisor-static not found on PATH. Install Cloud Hypervisor static binary.".into(),
|
|
));
|
|
}
|
|
if sal_process::which("ch-remote-static").is_none() {
|
|
return Err(CloudHvError::DependencyMissing(
|
|
"ch-remote-static not found on PATH. Install Cloud Hypervisor tools (static).".into(),
|
|
));
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
fn hero_vm_root() -> PathBuf {
|
|
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into());
|
|
Path::new(&home).join("hero/virt/vms")
|
|
}
|
|
|
|
fn vm_dir(id: &str) -> PathBuf {
|
|
hero_vm_root().join(id)
|
|
}
|
|
|
|
fn vm_json_path(id: &str) -> PathBuf {
|
|
vm_dir(id).join("vm.json")
|
|
}
|
|
|
|
fn vm_log_path(id: &str) -> PathBuf {
|
|
vm_dir(id).join("logs/console.log")
|
|
}
|
|
|
|
fn vm_pid_path(id: &str) -> PathBuf {
|
|
vm_dir(id).join("pid")
|
|
}
|
|
|
|
fn write_json(path: &Path, value: &serde_json::Value) -> Result<(), CloudHvError> {
|
|
if let Some(parent) = path.parent() {
|
|
fs::create_dir_all(parent).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
|
}
|
|
let s = serde_json::to_string_pretty(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
fs::write(path, s).map_err(|e| CloudHvError::IoError(e.to_string()))
|
|
}
|
|
|
|
fn read_json(path: &Path) -> Result<serde_json::Value, CloudHvError> {
|
|
let content = fs::read_to_string(path).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
|
serde_json::from_str(&content).map_err(|e| CloudHvError::JsonError(e.to_string()))
|
|
}
|
|
|
|
fn proc_exists(pid: i64) -> bool {
|
|
#[cfg(target_os = "linux")]
|
|
{
|
|
Path::new(&format!("/proc/{}", pid)).exists()
|
|
}
|
|
#[cfg(not(target_os = "linux"))]
|
|
{
|
|
// Minimal check for non-Linux; try a kill -0 style command
|
|
let res = sal_process::run(&format!("kill -0 {}", pid)).die(false).silent(true).execute();
|
|
res.map(|r| r.success).unwrap_or(false)
|
|
}
|
|
}
|
|
|
|
/// Create and persist a VM spec
|
|
pub fn vm_create(spec: &VmSpec) -> Result<String, CloudHvError> {
|
|
// Validate inputs minimally
|
|
if spec.id.trim().is_empty() {
|
|
return Err(CloudHvError::InvalidSpec("spec.id must not be empty".into()));
|
|
}
|
|
// Validate boot method: either firmware_path exists or kernel_path exists
|
|
let has_fw = spec
|
|
.firmware_path
|
|
.as_ref()
|
|
.map(|p| Path::new(p).exists())
|
|
.unwrap_or(false);
|
|
let has_kernel = spec
|
|
.kernel_path
|
|
.as_ref()
|
|
.map(|p| Path::new(p).exists())
|
|
.unwrap_or(false);
|
|
|
|
if !(has_fw || has_kernel) {
|
|
return Err(CloudHvError::InvalidSpec(
|
|
"either firmware_path or kernel_path must be set to an existing file".into(),
|
|
));
|
|
}
|
|
|
|
if !Path::new(&spec.disk_path).exists() {
|
|
return Err(CloudHvError::InvalidSpec(format!(
|
|
"disk_path not found: {}",
|
|
&spec.disk_path
|
|
)));
|
|
}
|
|
if spec.vcpus == 0 {
|
|
return Err(CloudHvError::InvalidSpec("vcpus must be >= 1".into()));
|
|
}
|
|
if spec.memory_mb == 0 {
|
|
return Err(CloudHvError::InvalidSpec("memory_mb must be >= 128".into()));
|
|
}
|
|
|
|
// Prepare directory layout
|
|
let dir = vm_dir(&spec.id);
|
|
sal_os::mkdir(
|
|
dir.to_str()
|
|
.unwrap_or_else(|| "/tmp/hero/virt/vms/__invalid__"),
|
|
)
|
|
.map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
|
let log_dir = dir.join("logs");
|
|
sal_os::mkdir(log_dir.to_str().unwrap()).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
|
|
|
// Persist initial record
|
|
let rec = VmRecord {
|
|
spec: spec.clone(),
|
|
runtime: VmRuntime {
|
|
pid: None,
|
|
status: "stopped".into(),
|
|
log_file: vm_log_path(&spec.id).to_string_lossy().into_owned(),
|
|
},
|
|
};
|
|
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
write_json(&vm_json_path(&spec.id), &value)?;
|
|
|
|
Ok(spec.id.clone())
|
|
}
|
|
|
|
/// Start a VM using cloud-hypervisor
|
|
pub fn vm_start(id: &str) -> Result<(), CloudHvError> {
|
|
ensure_deps()?;
|
|
|
|
// Load record
|
|
let p = vm_json_path(id);
|
|
if !p.exists() {
|
|
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
|
}
|
|
let value = read_json(&p)?;
|
|
let mut rec: VmRecord =
|
|
serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
|
|
// Prepare invocation
|
|
let api_socket = if rec.spec.api_socket.trim().is_empty() {
|
|
vm_dir(id).join("api.sock").to_string_lossy().into_owned()
|
|
} else {
|
|
rec.spec.api_socket.clone()
|
|
};
|
|
let log_file = vm_log_path(id).to_string_lossy().into_owned();
|
|
|
|
// Ensure API socket directory exists and remove any stale socket file
|
|
let api_path = Path::new(&api_socket);
|
|
if let Some(parent) = api_path.parent() {
|
|
fs::create_dir_all(parent).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
|
}
|
|
// Best-effort removal of stale socket
|
|
let _ = fs::remove_file(&api_path);
|
|
|
|
// Preflight disk: if source is qcow2, convert to raw to avoid CH "Compressed blocks not supported"
|
|
// Robust conversion:
|
|
// - Remove any stale destination
|
|
// - Try direct convert to destination file
|
|
// - On failure (e.g., byte-range lock issues), fallback to piping stdout into dd
|
|
let mut disk_to_use = rec.spec.disk_path.clone();
|
|
if let Ok(info) = qcow2::info(&disk_to_use) {
|
|
if info.get("format").and_then(|v| v.as_str()) == Some("qcow2") {
|
|
let dest = vm_dir(id).join("disk.raw").to_string_lossy().into_owned();
|
|
// Best-effort remove stale target file to avoid locking errors
|
|
let _ = fs::remove_file(&dest);
|
|
|
|
// Attempt 1: normal qemu-img convert to dest file
|
|
let cmd1 = format!(
|
|
"qemu-img convert -O raw {} {}",
|
|
shell_escape(&disk_to_use),
|
|
shell_escape(&dest)
|
|
);
|
|
let attempt1 = sal_process::run(&cmd1).silent(true).die(false).execute();
|
|
|
|
let mut converted_ok = false;
|
|
let mut err1: Option<String> = None;
|
|
|
|
if let Ok(res) = attempt1 {
|
|
if res.success {
|
|
converted_ok = true;
|
|
} else {
|
|
err1 = Some(format!("{}{}", res.stdout, res.stderr));
|
|
}
|
|
} else if let Err(e) = attempt1 {
|
|
err1 = Some(e.to_string());
|
|
}
|
|
|
|
if !converted_ok {
|
|
// Attempt 2: pipe via stdout into dd (avoids qemu-img destination locking semantics on some FS)
|
|
let cmd2 = format!(
|
|
"#!/bin/bash -euo pipefail\nqemu-img convert -O raw {} - | dd of={} bs=4M status=none",
|
|
shell_escape(&disk_to_use),
|
|
shell_escape(&dest)
|
|
);
|
|
match sal_process::run(&cmd2).silent(true).die(false).execute() {
|
|
Ok(res) if res.success => {
|
|
converted_ok = true;
|
|
}
|
|
Ok(res) => {
|
|
let mut msg = String::from("Failed converting qcow2 to raw.");
|
|
if let Some(e1) = err1 {
|
|
msg.push_str(&format!("\nFirst attempt error:\n{}", e1));
|
|
}
|
|
msg.push_str(&format!("\nSecond attempt error:\n{}{}", res.stdout, res.stderr));
|
|
return Err(CloudHvError::CommandFailed(msg));
|
|
}
|
|
Err(e) => {
|
|
let mut msg = String::from("Failed converting qcow2 to raw.");
|
|
if let Some(e1) = err1 {
|
|
msg.push_str(&format!("\nFirst attempt error:\n{}", e1));
|
|
}
|
|
msg.push_str(&format!("\nSecond attempt error:\n{}", e));
|
|
return Err(CloudHvError::CommandFailed(msg));
|
|
}
|
|
}
|
|
}
|
|
|
|
if converted_ok {
|
|
disk_to_use = dest;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Consolidate extra --disk occurrences from spec.extra_args into a single --disk (CH version requires variadic form)
|
|
// Collect disk value tokens provided by the user and strip them from extra args so we can render one '--disk' followed by multiple values.
|
|
let mut extra_disk_vals: Vec<String> = Vec::new();
|
|
let mut extra_args_sans_disks: Vec<String> = Vec::new();
|
|
if let Some(extra) = rec.spec.extra_args.clone() {
|
|
let mut i = 0usize;
|
|
while i < extra.len() {
|
|
let tok = extra[i].clone();
|
|
if tok == "--disk" {
|
|
if i + 1 < extra.len() {
|
|
extra_disk_vals.push(extra[i + 1].clone());
|
|
i += 2;
|
|
continue;
|
|
} else {
|
|
// dangling --disk without value; drop it
|
|
i += 1;
|
|
continue;
|
|
}
|
|
} else if tok == "--no-default-net" {
|
|
// sentinel: suppress default networking; do not pass to CH CLI
|
|
i += 1;
|
|
continue;
|
|
} else if let Some(rest) = tok.strip_prefix("--disk=") {
|
|
if !rest.is_empty() {
|
|
extra_disk_vals.push(rest.to_string());
|
|
}
|
|
i += 1;
|
|
continue;
|
|
}
|
|
// keep token
|
|
extra_args_sans_disks.push(tok);
|
|
i += 1;
|
|
}
|
|
}
|
|
|
|
// CH CLI flags (very common subset)
|
|
// --disk path=... uses virtio-blk by default
|
|
let mut parts: Vec<String> = vec![
|
|
"cloud-hypervisor-static".into(),
|
|
"--api-socket".into(),
|
|
api_socket.clone(),
|
|
];
|
|
|
|
if let Some(fw) = rec.spec.firmware_path.clone() {
|
|
// Firmware boot path
|
|
parts.push("--firmware".into());
|
|
parts.push(fw);
|
|
} else if let Some(kpath) = rec.spec.kernel_path.clone() {
|
|
// Direct kernel boot path
|
|
let cmdline = rec
|
|
.spec
|
|
.cmdline
|
|
.clone()
|
|
.unwrap_or_else(|| "console=ttyS0 reboot=k panic=1".to_string());
|
|
parts.push("--kernel".into());
|
|
parts.push(kpath);
|
|
if let Some(initrd) = rec.spec.initramfs_path.clone() {
|
|
if Path::new(&initrd).exists() {
|
|
parts.push("--initramfs".into());
|
|
parts.push(initrd);
|
|
}
|
|
}
|
|
parts.push("--cmdline".into());
|
|
parts.push(cmdline);
|
|
} else {
|
|
return Err(CloudHvError::InvalidSpec(
|
|
"neither firmware_path nor kernel_path set at start time".into(),
|
|
));
|
|
}
|
|
|
|
parts.push("--disk".into());
|
|
parts.push(format!("path={}", disk_to_use));
|
|
// Append any additional disk value tokens (from sanitized extra args) so CH sees a single '--disk' with multiple values
|
|
for dv in &extra_disk_vals {
|
|
parts.push(dv.clone());
|
|
}
|
|
parts.push("--cpus".into());
|
|
parts.push(format!("boot={}", rec.spec.vcpus));
|
|
parts.push("--memory".into());
|
|
parts.push(format!("size={}M", rec.spec.memory_mb));
|
|
parts.push("--serial".into());
|
|
parts.push("tty".into());
|
|
parts.push("--console".into());
|
|
parts.push("off".into());
|
|
|
|
// Determine if the user provided explicit network arguments (e.g. "--net", "tap=...,mac=...")
|
|
// If so, do NOT provision the default host networking or add a default NIC.
|
|
let has_user_net = rec
|
|
.spec
|
|
.extra_args
|
|
.as_ref()
|
|
.map(|v| v.iter().any(|tok| tok == "--net" || tok == "--no-default-net"))
|
|
.unwrap_or(false);
|
|
|
|
if !has_user_net {
|
|
// Networking prerequisites (bridge + NAT via nftables + dnsmasq DHCP)
|
|
// Defaults can be overridden via env:
|
|
// HERO_VIRT_BRIDGE_NAME, HERO_VIRT_BRIDGE_ADDR_CIDR, HERO_VIRT_SUBNET_CIDR, HERO_VIRT_DHCP_START, HERO_VIRT_DHCP_END
|
|
let bridge_name = std::env::var("HERO_VIRT_BRIDGE_NAME").unwrap_or_else(|_| "br-hero".into());
|
|
let bridge_addr_cidr =
|
|
std::env::var("HERO_VIRT_BRIDGE_ADDR_CIDR").unwrap_or_else(|_| "172.30.0.1/24".into());
|
|
let subnet_cidr =
|
|
std::env::var("HERO_VIRT_SUBNET_CIDR").unwrap_or_else(|_| "172.30.0.0/24".into());
|
|
let dhcp_start =
|
|
std::env::var("HERO_VIRT_DHCP_START").unwrap_or_else(|_| "172.30.0.50".into());
|
|
let dhcp_end =
|
|
std::env::var("HERO_VIRT_DHCP_END").unwrap_or_else(|_| "172.30.0.250".into());
|
|
|
|
// Ensure host-side networking (requires root privileges / CAP_NET_ADMIN)
|
|
ensure_host_net_prereq_dnsmasq_nftables(
|
|
&bridge_name,
|
|
&bridge_addr_cidr,
|
|
&subnet_cidr,
|
|
&dhcp_start,
|
|
&dhcp_end,
|
|
)?;
|
|
|
|
// Ensure a TAP device for this VM and attach to the bridge
|
|
let tap_name = ensure_tap_for_vm(&bridge_name, id)?;
|
|
// Stable locally-administered MAC derived from VM id
|
|
let mac = stable_mac_from_id(id);
|
|
|
|
parts.push("--net".into());
|
|
parts.push(format!("tap={},mac={}", tap_name, mac));
|
|
}
|
|
|
|
// Append any user-provided extra args, sans any '--disk' we already consolidated
|
|
for e in extra_args_sans_disks {
|
|
parts.push(e);
|
|
}
|
|
|
|
let args_str = shell_join(&parts);
|
|
let script = format!(
|
|
"#!/bin/bash -e
|
|
nohup {} > '{}' 2>&1 &
|
|
echo $! > '{}'
|
|
",
|
|
args_str,
|
|
log_file,
|
|
vm_pid_path(id).to_string_lossy()
|
|
);
|
|
|
|
// Execute script; this will background cloud-hypervisor and return
|
|
let result = sal_process::run(&script).execute();
|
|
match result {
|
|
Ok(res) => {
|
|
if !res.success {
|
|
return Err(CloudHvError::CommandFailed(format!(
|
|
"Failed to start VM '{}': {}",
|
|
id, res.stderr
|
|
)));
|
|
}
|
|
}
|
|
Err(e) => {
|
|
return Err(CloudHvError::CommandFailed(format!(
|
|
"Failed to start VM '{}': {}",
|
|
id, e
|
|
)))
|
|
}
|
|
}
|
|
|
|
// Read PID back
|
|
let pid = match fs::read_to_string(vm_pid_path(id)) {
|
|
Ok(s) => s.trim().parse::<i64>().ok(),
|
|
Err(_) => None,
|
|
};
|
|
|
|
// Quick health check: ensure process did not exit immediately due to CLI errors (e.g., duplicate flags)
|
|
if let Some(pid_num) = pid {
|
|
thread::sleep(Duration::from_millis(300));
|
|
if !proc_exists(pid_num) {
|
|
// Tail log to surface the error cause
|
|
let tail_cmd = format!("tail -n 200 {}", shell_escape(&log_file));
|
|
let tail = sal_process::run(&tail_cmd).die(false).silent(true).execute();
|
|
let mut log_snip = String::new();
|
|
if let Ok(res) = tail {
|
|
if res.success {
|
|
log_snip = res.stdout;
|
|
} else {
|
|
log_snip = format!("{}{}", res.stdout, res.stderr);
|
|
}
|
|
}
|
|
return Err(CloudHvError::CommandFailed(format!(
|
|
"cloud-hypervisor exited immediately after start. Log tail:\n{}",
|
|
log_snip
|
|
)));
|
|
}
|
|
} else {
|
|
return Err(CloudHvError::CommandFailed(
|
|
"failed to obtain cloud-hypervisor PID (start script did not write pid)".into(),
|
|
));
|
|
}
|
|
|
|
// Update state
|
|
rec.runtime.pid = pid;
|
|
rec.runtime.status = if pid.is_some() { "running".into() } else { "stopped".into() };
|
|
rec.runtime.log_file = log_file;
|
|
rec.spec.api_socket = api_socket.clone();
|
|
|
|
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
write_json(&vm_json_path(id), &value)?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Return VM record info (spec + runtime) by id
|
|
pub fn vm_info(id: &str) -> Result<VmRecord, CloudHvError> {
|
|
let p = vm_json_path(id);
|
|
if !p.exists() {
|
|
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
|
}
|
|
let value = read_json(&p)?;
|
|
let rec: VmRecord = serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
Ok(rec)
|
|
}
|
|
|
|
/// Stop a VM via ch-remote (graceful), optionally force kill
|
|
pub fn vm_stop(id: &str, force: bool) -> Result<(), CloudHvError> {
|
|
ensure_deps().ok(); // best-effort; we might still force-kill
|
|
|
|
let p = vm_json_path(id);
|
|
if !p.exists() {
|
|
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
|
}
|
|
let value = read_json(&p)?;
|
|
let mut rec: VmRecord =
|
|
serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
|
|
// Attempt graceful shutdown if api socket known
|
|
if !rec.spec.api_socket.trim().is_empty() {
|
|
let cmd = format!("ch-remote-static --api-socket {} shutdown", rec.spec.api_socket);
|
|
let _ = sal_process::run(&cmd).die(false).silent(true).execute();
|
|
}
|
|
|
|
// Wait for process to exit (up to ~10s)
|
|
if let Some(pid) = rec.runtime.pid {
|
|
for _ in 0..50 {
|
|
if !proc_exists(pid) {
|
|
break;
|
|
}
|
|
thread::sleep(Duration::from_millis(200));
|
|
}
|
|
// If still alive and force, kill -9 and wait again (up to ~10s)
|
|
if proc_exists(pid) && force {
|
|
// Send SIGKILL without extra shell layers; suppress errors/noise
|
|
let _ = sal_process::run(&format!("kill -9 {}", pid))
|
|
.die(false)
|
|
.silent(true)
|
|
.execute();
|
|
for _ in 0..50 {
|
|
if !proc_exists(pid) {
|
|
break;
|
|
}
|
|
thread::sleep(Duration::from_millis(200));
|
|
}
|
|
}
|
|
}
|
|
|
|
// Update state
|
|
rec.runtime.status = "stopped".into();
|
|
rec.runtime.pid = None;
|
|
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
write_json(&vm_json_path(id), &value)?;
|
|
|
|
// Remove pid file
|
|
let _ = fs::remove_file(vm_pid_path(id));
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Delete a VM definition; optionally delete disks.
|
|
pub fn vm_delete(id: &str, delete_disks: bool) -> Result<(), CloudHvError> {
|
|
let p = vm_json_path(id);
|
|
if !p.exists() {
|
|
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
|
}
|
|
let rec: VmRecord = serde_json::from_value(read_json(&p)?)
|
|
.map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
|
|
// If appears to be running, attempt a force stop first (best-effort)
|
|
if let Some(pid) = rec.runtime.pid {
|
|
if proc_exists(pid) {
|
|
let _ = vm_stop(id, true);
|
|
// Re-check original PID for liveness (up to ~5s)
|
|
for _ in 0..25 {
|
|
if !proc_exists(pid) {
|
|
break;
|
|
}
|
|
thread::sleep(Duration::from_millis(200));
|
|
}
|
|
if proc_exists(pid) {
|
|
return Err(CloudHvError::CommandFailed(
|
|
"VM appears to be running; stop it first".into(),
|
|
));
|
|
}
|
|
}
|
|
}
|
|
|
|
if delete_disks {
|
|
let _ = fs::remove_file(&rec.spec.disk_path);
|
|
}
|
|
|
|
let d = vm_dir(id);
|
|
fs::remove_dir_all(&d).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
|
Ok(())
|
|
}
|
|
|
|
/// List all VMs
|
|
pub fn vm_list() -> Result<Vec<VmRecord>, CloudHvError> {
|
|
let root = hero_vm_root();
|
|
if !root.exists() {
|
|
return Ok(vec![]);
|
|
}
|
|
let mut out = vec![];
|
|
for entry in fs::read_dir(&root).map_err(|e| CloudHvError::IoError(e.to_string()))? {
|
|
let entry = entry.map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
|
let p = entry.path();
|
|
if !p.is_dir() {
|
|
continue;
|
|
}
|
|
let vm_json = p.join("vm.json");
|
|
if !vm_json.exists() {
|
|
continue;
|
|
}
|
|
let rec: VmRecord = serde_json::from_value(read_json(&vm_json)?)
|
|
.map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
|
|
|
out.push(rec);
|
|
}
|
|
Ok(out)
|
|
}
|
|
|
|
fn tap_name_for_id(id: &str) -> String {
|
|
// Linux IFNAMSIZ is typically 15; keep "tap-" + 10 hex = 14 chars
|
|
let mut h = DefaultHasher::new();
|
|
id.hash(&mut h);
|
|
let v = h.finish();
|
|
let hex = format!("{:016x}", v);
|
|
format!("tap-{}", &hex[..10])
|
|
}
|
|
|
|
fn ensure_tap_for_vm(bridge_name: &str, id: &str) -> Result<String, CloudHvError> {
|
|
let tap = tap_name_for_id(id);
|
|
|
|
let script = format!(
|
|
"#!/bin/bash -e
|
|
BR={br}
|
|
TAP={tap}
|
|
UIDX=$(id -u)
|
|
GIDX=$(id -g)
|
|
|
|
# Create TAP if missing and assign to current user/group
|
|
ip link show \"$TAP\" >/dev/null 2>&1 || ip tuntap add dev \"$TAP\" mode tap user \"$UIDX\" group \"$GIDX\"
|
|
|
|
# Enslave to bridge and bring up (idempotent)
|
|
ip link set \"$TAP\" master \"$BR\" 2>/dev/null || true
|
|
ip link set \"$TAP\" up
|
|
",
|
|
br = shell_escape(bridge_name),
|
|
tap = shell_escape(&tap),
|
|
);
|
|
|
|
match sal_process::run(&script).silent(true).execute() {
|
|
Ok(res) if res.success => Ok(tap),
|
|
Ok(res) => Err(CloudHvError::CommandFailed(format!(
|
|
"Failed to ensure TAP '{}': {}",
|
|
tap, res.stderr
|
|
))),
|
|
Err(e) => Err(CloudHvError::CommandFailed(format!(
|
|
"Failed to ensure TAP '{}': {}",
|
|
tap, e
|
|
))),
|
|
}
|
|
}
|
|
|
|
fn stable_mac_from_id(id: &str) -> String {
|
|
let mut h = DefaultHasher::new();
|
|
id.hash(&mut h);
|
|
let v = h.finish();
|
|
let b0 = (((v >> 40) & 0xff) as u8 & 0xfe) | 0x02; // locally administered, unicast
|
|
let b1 = ((v >> 32) & 0xff) as u8;
|
|
let b2 = ((v >> 24) & 0xff) as u8;
|
|
let b3 = ((v >> 16) & 0xff) as u8;
|
|
let b4 = ((v >> 8) & 0xff) as u8;
|
|
let b5 = (v & 0xff) as u8;
|
|
format!("{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", b0, b1, b2, b3, b4, b5)
|
|
}
|
|
|
|
fn ensure_host_net_prereq_dnsmasq_nftables(
|
|
bridge_name: &str,
|
|
bridge_addr_cidr: &str,
|
|
subnet_cidr: &str,
|
|
dhcp_start: &str,
|
|
dhcp_end: &str,
|
|
) -> Result<(), CloudHvError> {
|
|
// Dependencies
|
|
for bin in ["ip", "nft", "dnsmasq", "systemctl"] {
|
|
if sal_process::which(bin).is_none() {
|
|
return Err(CloudHvError::DependencyMissing(format!(
|
|
"{} not found on PATH; required for VM networking",
|
|
bin
|
|
)));
|
|
}
|
|
}
|
|
|
|
// Build idempotent setup script
|
|
let script = format!(
|
|
"#!/bin/bash -e
|
|
set -e
|
|
|
|
BR={br}
|
|
BR_ADDR={br_addr}
|
|
SUBNET={subnet}
|
|
DHCP_START={dstart}
|
|
DHCP_END={dend}
|
|
|
|
# Determine default WAN interface
|
|
WAN_IF=$(ip -o route show default | awk '{{print $5}}' | head -n1)
|
|
|
|
# Bridge creation (idempotent)
|
|
ip link show \"$BR\" >/dev/null 2>&1 || ip link add name \"$BR\" type bridge
|
|
ip addr replace \"$BR_ADDR\" dev \"$BR\"
|
|
ip link set \"$BR\" up
|
|
|
|
# IPv6 placeholder address + forward (temporary)
|
|
ip -6 addr add 400::1/64 dev \"$BR\" 2>/dev/null || true
|
|
sysctl -w net.ipv6.conf.all.forwarding=1 >/dev/null || true
|
|
|
|
# IPv4 forwarding
|
|
sysctl -w net.ipv4.ip_forward=1 >/dev/null
|
|
|
|
# nftables NAT (idempotent)
|
|
nft list table ip hero >/dev/null 2>&1 || nft add table ip hero
|
|
nft list chain ip hero postrouting >/dev/null 2>&1 || nft add chain ip hero postrouting {{ type nat hook postrouting priority 100 \\; }}
|
|
nft list chain ip hero postrouting | grep -q \"ip saddr $SUBNET oifname \\\"$WAN_IF\\\" masquerade\" \
|
|
|| nft add rule ip hero postrouting ip saddr $SUBNET oifname \"$WAN_IF\" masquerade
|
|
|
|
# dnsmasq DHCP config (idempotent)
|
|
mkdir -p /etc/dnsmasq.d
|
|
CFG=/etc/dnsmasq.d/hero-$BR.conf
|
|
TMP=/etc/dnsmasq.d/.hero-$BR.conf.new
|
|
cat >\"$TMP\" <<EOF
|
|
interface=$BR
|
|
bind-interfaces
|
|
dhcp-range=$DHCP_START,$DHCP_END,12h
|
|
dhcp-option=option:dns-server,1.1.1.1,8.8.8.8
|
|
EOF
|
|
|
|
if [ ! -f \"$CFG\" ] || ! cmp -s \"$CFG\" \"$TMP\"; then
|
|
mv \"$TMP\" \"$CFG\"
|
|
if systemctl is-active --quiet dnsmasq; then
|
|
systemctl reload dnsmasq || systemctl restart dnsmasq || true
|
|
else
|
|
systemctl enable --now dnsmasq || true
|
|
fi
|
|
else
|
|
rm -f \"$TMP\"
|
|
systemctl enable --now dnsmasq || true
|
|
fi
|
|
",
|
|
br = shell_escape(bridge_name),
|
|
br_addr = shell_escape(bridge_addr_cidr),
|
|
subnet = shell_escape(subnet_cidr),
|
|
dstart = shell_escape(dhcp_start),
|
|
dend = shell_escape(dhcp_end),
|
|
);
|
|
|
|
match sal_process::run(&script).silent(true).execute() {
|
|
Ok(res) if res.success => Ok(()),
|
|
Ok(res) => Err(CloudHvError::CommandFailed(format!(
|
|
"Host networking setup failed: {}",
|
|
res.stderr
|
|
))),
|
|
Err(e) => Err(CloudHvError::CommandFailed(format!(
|
|
"Host networking setup failed: {}",
|
|
e
|
|
))),
|
|
}
|
|
}
|
|
|
|
/// Render a shell-safe command string from vector of tokens
|
|
fn shell_join(parts: &Vec<String>) -> String {
|
|
let mut s = String::new();
|
|
for (i, p) in parts.iter().enumerate() {
|
|
if i > 0 {
|
|
s.push(' ');
|
|
}
|
|
s.push_str(&shell_escape(p));
|
|
}
|
|
s
|
|
}
|
|
|
|
fn shell_escape(s: &str) -> String {
|
|
if s.is_empty() {
|
|
return "''".into();
|
|
}
|
|
if s
|
|
.chars()
|
|
.all(|c| c.is_ascii_alphanumeric() || "-_./=:".contains(c))
|
|
{
|
|
return s.into();
|
|
}
|
|
// single-quote wrap, escape existing quotes
|
|
let mut out = String::from("'");
|
|
for ch in s.chars() {
|
|
if ch == '\'' {
|
|
out.push_str("'\"'\"'");
|
|
} else {
|
|
out.push(ch);
|
|
}
|
|
}
|
|
out.push('\'');
|
|
out
|
|
} |