7 Commits

Author SHA1 Message Date
Maxime Van Hees
da3da0ae30 working ipv6 ip assignment + ssh with login/passwd 2025-08-28 15:19:37 +02:00
Maxime Van Hees
784f87db97 WIP2 2025-08-27 16:03:32 +02:00
Maxime Van Hees
773db2238d working version 1 2025-08-26 17:46:42 +02:00
Maxime Van Hees
e8a369e3a2 WIP2 2025-08-26 17:43:20 +02:00
Maxime Van Hees
4b4f3371b0 WIP: automating VM deployment 2025-08-26 16:50:59 +02:00
Maxime Van Hees
1bb731711b (unstable) pushing WIP 2025-08-25 15:25:00 +02:00
Maxime Van Hees
af89ef0149 networking VMs (WIP) 2025-08-21 18:57:20 +02:00
15 changed files with 3012 additions and 45 deletions

View File

@@ -0,0 +1,170 @@
use crate::cloudhv::{vm_create, vm_start, CloudHvError, VmSpec};
use crate::image_prep::{image_prepare, Flavor as ImgFlavor, ImagePrepOptions, NetPlanOpts};
use sal_process;
/// Cloud Hypervisor VM Builder focused on Rhai ergonomics.
///
/// Defaults enforced:
/// - kernel: /images/hypervisor-fw (firmware file in images directory)
/// - seccomp: false (pushed via extra args)
/// - serial: tty, console: off (already added by vm_start)
/// - cmdline: "console=ttyS0 root=/dev/vda1 rw"
/// - vcpus: 2
/// - memory_mb: 2048
///
/// Disk can be provided directly or prepared from a flavor (/images source).
#[derive(Debug, Clone)]
pub struct CloudHvBuilder {
id: String,
disk_path: Option<String>,
flavor: Option<ImgFlavor>,
memory_mb: u32,
vcpus: u32,
cmdline: Option<String>,
extra_args: Vec<String>,
no_default_net: bool,
}
impl CloudHvBuilder {
pub fn new(id: &str) -> Self {
Self {
id: id.to_string(),
disk_path: None,
flavor: None,
memory_mb: 2048,
vcpus: 2,
cmdline: Some("console=ttyS0 root=/dev/vda1 rw".to_string()),
// Enforce --seccomp false by default using extra args
extra_args: vec!["--seccomp".into(), "false".into()],
no_default_net: false,
}
}
pub fn disk(&mut self, path: &str) -> &mut Self {
self.disk_path = Some(path.to_string());
self.flavor = None;
self
}
pub fn disk_from_flavor(&mut self, flavor: &str) -> &mut Self {
let f = match flavor {
"ubuntu" | "Ubuntu" | "UBUNTU" => ImgFlavor::Ubuntu,
"alpine" | "Alpine" | "ALPINE" => ImgFlavor::Alpine,
_ => ImgFlavor::Ubuntu,
};
self.flavor = Some(f);
self.disk_path = None;
self
}
pub fn memory_mb(&mut self, mb: u32) -> &mut Self {
if mb > 0 {
self.memory_mb = mb;
}
self
}
pub fn vcpus(&mut self, v: u32) -> &mut Self {
if v > 0 {
self.vcpus = v;
}
self
}
pub fn cmdline(&mut self, c: &str) -> &mut Self {
self.cmdline = Some(c.to_string());
self
}
pub fn extra_arg(&mut self, a: &str) -> &mut Self {
if !a.trim().is_empty() {
self.extra_args.push(a.to_string());
}
self
}
/// Suppress the default host networking provisioning and NIC injection.
/// Internally, we set a sentinel consumed by vm_start.
pub fn no_default_net(&mut self) -> &mut Self {
self.no_default_net = true;
// add sentinel consumed in vm_start
if !self
.extra_args
.iter()
.any(|e| e.as_str() == "--no-default-net")
{
self.extra_args.push("--no-default-net".into());
}
self
}
/// Resolve absolute path to hypervisor-fw from /images
fn resolve_hypervisor_fw() -> Result<String, CloudHvError> {
let p = "/images/hypervisor-fw";
if std::path::Path::new(p).exists() {
Ok(p.to_string())
} else {
Err(CloudHvError::DependencyMissing(format!(
"firmware not found: {} (expected hypervisor-fw in /images)",
p
)))
}
}
/// Prepare disk if needed and return final disk path.
/// For Ubuntu flavor, this will:
/// - copy source to per-VM work qcow2
/// - mount, retag UUIDs, fstab/grub/netplan adjustments
/// - convert to raw under the VM dir and return that raw path
fn ensure_disk(&self) -> Result<String, CloudHvError> {
if let Some(p) = &self.disk_path {
return Ok(p.clone());
}
if let Some(f) = &self.flavor {
// Use defaults: DHCPv4, placeholder static IPv6
let opts = ImagePrepOptions {
flavor: f.clone(),
id: self.id.clone(),
source: None,
target_dir: None,
net: NetPlanOpts::default(),
disable_cloud_init_net: true,
};
let res = image_prepare(&opts).map_err(|e| CloudHvError::CommandFailed(e.to_string()))?;
return Ok(res.raw_disk);
}
Err(CloudHvError::InvalidSpec(
"no disk configured; set .disk(path) or .disk_from_flavor(flavor)".into(),
))
}
/// Build final VmSpec and start the VM.
pub fn launch(&mut self) -> Result<String, CloudHvError> {
// Resolve hypervisor-fw absolute path
let kernel_path = Self::resolve_hypervisor_fw()?;
// Disk
let disk_path = self.ensure_disk()?;
let spec = VmSpec {
id: self.id.clone(),
// We use direct kernel boot with hypervisor-fw per requirements.
kernel_path: Some(kernel_path),
initramfs_path: None,
firmware_path: None,
disk_path,
api_socket: "".into(),
vcpus: self.vcpus,
memory_mb: self.memory_mb,
cmdline: self.cmdline.clone(),
extra_args: if self.extra_args.is_empty() {
None
} else {
Some(self.extra_args.clone())
},
};
let id = vm_create(&spec)?;
vm_start(&id)?;
Ok(id)
}
}

View File

@@ -5,11 +5,15 @@ use std::fs;
use std::path::{Path, PathBuf};
use std::thread;
use std::time::Duration;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use sal_os;
use sal_process;
use crate::qcow2;
pub mod builder;
/// Error type for Cloud Hypervisor operations
#[derive(Debug)]
pub enum CloudHvError {
@@ -41,6 +45,8 @@ pub struct VmSpec {
pub id: String,
/// Optional for firmware boot; required for direct kernel boot
pub kernel_path: Option<String>,
/// Optional initramfs when using direct kernel boot
pub initramfs_path: Option<String>,
/// Optional for direct kernel boot; required for firmware boot
pub firmware_path: Option<String>,
/// Disk image path (qcow2 or raw)
@@ -226,38 +232,108 @@ pub fn vm_start(id: &str) -> Result<(), CloudHvError> {
let _ = fs::remove_file(&api_path);
// Preflight disk: if source is qcow2, convert to raw to avoid CH "Compressed blocks not supported"
// This is best-effort: if qemu-img is unavailable or info fails, we skip conversion.
// Robust conversion:
// - Remove any stale destination
// - Try direct convert to destination file
// - On failure (e.g., byte-range lock issues), fallback to piping stdout into dd
let mut disk_to_use = rec.spec.disk_path.clone();
if let Ok(info) = qcow2::info(&disk_to_use) {
if info.get("format").and_then(|v| v.as_str()) == Some("qcow2") {
let dest = vm_dir(id).join("disk.raw").to_string_lossy().into_owned();
let cmd = format!(
// Best-effort remove stale target file to avoid locking errors
let _ = fs::remove_file(&dest);
// Attempt 1: normal qemu-img convert to dest file
let cmd1 = format!(
"qemu-img convert -O raw {} {}",
shell_escape(&disk_to_use),
shell_escape(&dest)
);
match sal_process::run(&cmd).silent(true).execute() {
let attempt1 = sal_process::run(&cmd1).silent(true).die(false).execute();
let mut converted_ok = false;
let mut err1: Option<String> = None;
if let Ok(res) = attempt1 {
if res.success {
converted_ok = true;
} else {
err1 = Some(format!("{}{}", res.stdout, res.stderr));
}
} else if let Err(e) = attempt1 {
err1 = Some(e.to_string());
}
if !converted_ok {
// Attempt 2: pipe via stdout into dd (avoids qemu-img destination locking semantics on some FS)
let heredoc2 = format!(
"bash -e -s <<'EOF'\nset -euo pipefail\nqemu-img convert -O raw {} - | dd of={} bs=4M status=none\nEOF\n",
shell_escape(&disk_to_use),
shell_escape(&dest)
);
match sal_process::run(&heredoc2).silent(true).die(false).execute() {
Ok(res) if res.success => {
disk_to_use = dest;
converted_ok = true;
}
Ok(res) => {
return Err(CloudHvError::CommandFailed(format!(
"Failed converting qcow2 to raw: {}",
res.stderr
)));
let mut msg = String::from("Failed converting qcow2 to raw.");
if let Some(e1) = err1 {
msg.push_str(&format!("\nFirst attempt error:\n{}", e1));
}
msg.push_str(&format!("\nSecond attempt error:\n{}{}", res.stdout, res.stderr));
return Err(CloudHvError::CommandFailed(msg));
}
Err(e) => {
return Err(CloudHvError::CommandFailed(format!(
"Failed converting qcow2 to raw: {}",
e
)));
let mut msg = String::from("Failed converting qcow2 to raw.");
if let Some(e1) = err1 {
msg.push_str(&format!("\nFirst attempt error:\n{}", e1));
}
msg.push_str(&format!("\nSecond attempt error:\n{}", e));
return Err(CloudHvError::CommandFailed(msg));
}
}
}
// Build command (minimal args for Phase 2)
// We redirect all output to log_file via shell and keep process in background with nohup
if converted_ok {
disk_to_use = dest;
}
}
}
// Consolidate extra --disk occurrences from spec.extra_args into a single --disk (CH version requires variadic form)
// Collect disk value tokens provided by the user and strip them from extra args so we can render one '--disk' followed by multiple values.
let mut extra_disk_vals: Vec<String> = Vec::new();
let mut extra_args_sans_disks: Vec<String> = Vec::new();
if let Some(extra) = rec.spec.extra_args.clone() {
let mut i = 0usize;
while i < extra.len() {
let tok = extra[i].clone();
if tok == "--disk" {
if i + 1 < extra.len() {
extra_disk_vals.push(extra[i + 1].clone());
i += 2;
continue;
} else {
// dangling --disk without value; drop it
i += 1;
continue;
}
} else if tok == "--no-default-net" {
// sentinel: suppress default networking; do not pass to CH CLI
i += 1;
continue;
} else if let Some(rest) = tok.strip_prefix("--disk=") {
if !rest.is_empty() {
extra_disk_vals.push(rest.to_string());
}
i += 1;
continue;
}
// keep token
extra_args_sans_disks.push(tok);
i += 1;
}
}
// CH CLI flags (very common subset)
// --disk path=... uses virtio-blk by default
@@ -280,6 +356,12 @@ pub fn vm_start(id: &str) -> Result<(), CloudHvError> {
.unwrap_or_else(|| "console=ttyS0 reboot=k panic=1".to_string());
parts.push("--kernel".into());
parts.push(kpath);
if let Some(initrd) = rec.spec.initramfs_path.clone() {
if Path::new(&initrd).exists() {
parts.push("--initramfs".into());
parts.push(initrd);
}
}
parts.push("--cmdline".into());
parts.push(cmdline);
} else {
@@ -290,6 +372,10 @@ pub fn vm_start(id: &str) -> Result<(), CloudHvError> {
parts.push("--disk".into());
parts.push(format!("path={}", disk_to_use));
// Append any additional disk value tokens (from sanitized extra args) so CH sees a single '--disk' with multiple values
for dv in &extra_disk_vals {
parts.push(dv.clone());
}
parts.push("--cpus".into());
parts.push(format!("boot={}", rec.spec.vcpus));
parts.push("--memory".into());
@@ -299,25 +385,99 @@ pub fn vm_start(id: &str) -> Result<(), CloudHvError> {
parts.push("--console".into());
parts.push("off".into());
if let Some(extra) = rec.spec.extra_args.clone() {
for e in extra {
parts.push(e);
// Determine if the user provided explicit network arguments (e.g. "--net", "tap=...,mac=...")
// If so, do NOT provision the default host networking or add a default NIC.
let has_user_net = rec
.spec
.extra_args
.as_ref()
.map(|v| v.iter().any(|tok| tok == "--net" || tok == "--no-default-net"))
.unwrap_or(false);
if !has_user_net {
// Networking prerequisites (bridge + NAT via nftables + dnsmasq DHCP)
// Defaults can be overridden via env:
// HERO_VIRT_BRIDGE_NAME, HERO_VIRT_BRIDGE_ADDR_CIDR, HERO_VIRT_SUBNET_CIDR, HERO_VIRT_DHCP_START, HERO_VIRT_DHCP_END
let bridge_name = std::env::var("HERO_VIRT_BRIDGE_NAME").unwrap_or_else(|_| "br-hero".into());
let bridge_addr_cidr =
std::env::var("HERO_VIRT_BRIDGE_ADDR_CIDR").unwrap_or_else(|_| "172.30.0.1/24".into());
let subnet_cidr =
std::env::var("HERO_VIRT_SUBNET_CIDR").unwrap_or_else(|_| "172.30.0.0/24".into());
let dhcp_start =
std::env::var("HERO_VIRT_DHCP_START").unwrap_or_else(|_| "172.30.0.50".into());
let dhcp_end =
std::env::var("HERO_VIRT_DHCP_END").unwrap_or_else(|_| "172.30.0.250".into());
// IPv6 over Mycelium: enabled by default.
// If explicitly disabled via HERO_VIRT_IPV6_ENABLE=false|0, we skip.
// If enabled but Mycelium is not detected, return an error.
let ipv6_env = std::env::var("HERO_VIRT_IPV6_ENABLE").unwrap_or_else(|_| "".into());
let ipv6_requested = match ipv6_env.to_lowercase().as_str() {
"" | "1" | "true" | "yes" => true,
"0" | "false" | "no" => false,
_ => true,
};
let mycelium_if_cfg = std::env::var("HERO_VIRT_MYCELIUM_IF").unwrap_or_else(|_| "mycelium".into());
let mut ipv6_bridge_cidr: Option<String> = None;
let mut mycelium_if_opt: Option<String> = None;
if ipv6_requested {
if let Ok(cidr) = std::env::var("HERO_VIRT_IPV6_BRIDGE_CIDR") {
// Explicit override for bridge IPv6 (e.g., "400:...::2/64") but still require mycelium iface presence.
// Validate mycelium interface and that it has IPv6 configured.
let _ = get_mycelium_ipv6_addr(&mycelium_if_cfg)?; // returns DependencyMissing on failure
ipv6_bridge_cidr = Some(cidr);
mycelium_if_opt = Some(mycelium_if_cfg.clone());
} else {
// Auto-derive from mycelium node address; error out if not detected.
println!("auto-deriving mycelium address...");
let (ifname, myc_addr) = get_mycelium_ipv6_addr(&mycelium_if_cfg)?;
println!("on if {ifname}, got myc addr: {myc_addr}");
let (_pfx, router_cidr) = derive_ipv6_prefix_from_mycelium(&myc_addr)?;
println!("derived pfx: {_pfx} and router cidr: {router_cidr}");
ipv6_bridge_cidr = Some(router_cidr);
mycelium_if_opt = Some(ifname);
}
}
// Ensure host-side networking (requires root privileges / CAP_NET_ADMIN)
ensure_host_net_prereq_dnsmasq_nftables(
&bridge_name,
&bridge_addr_cidr,
&subnet_cidr,
&dhcp_start,
&dhcp_end,
ipv6_bridge_cidr.as_deref(),
mycelium_if_opt.as_deref(),
)?;
// Ensure a TAP device for this VM and attach to the bridge
let tap_name = ensure_tap_for_vm(&bridge_name, id)?;
println!("TAP device for vm called: {tap_name}");
// Stable locally-administered MAC derived from VM id
let mac = stable_mac_from_id(id);
println!("MAC for vm: {mac}");
parts.push("--net".into());
parts.push(format!("tap={},mac={}", tap_name, mac));
}
// Append any user-provided extra args, sans any '--disk' we already consolidated
for e in extra_args_sans_disks {
parts.push(e);
}
let args_str = shell_join(&parts);
let script = format!(
"#!/bin/bash -e
nohup {} > '{}' 2>&1 &
echo $! > '{}'
",
// Execute via a bash heredoc to avoid any quoting pitfalls
let heredoc = format!(
"bash -e -s <<'EOF'\nnohup {} > '{}' 2>&1 &\necho $! > '{}'\nEOF\n",
args_str,
log_file,
vm_pid_path(id).to_string_lossy()
);
// Execute script; this will background cloud-hypervisor and return
let result = sal_process::run(&script).execute();
println!("executing command:\n{heredoc}");
// Execute command; this will background cloud-hypervisor and return
let result = sal_process::run(&heredoc).execute();
match result {
Ok(res) => {
if !res.success {
@@ -340,6 +500,34 @@ echo $! > '{}'
Ok(s) => s.trim().parse::<i64>().ok(),
Err(_) => None,
};
println!("reading PID back: {} - (if 0 == not found)", pid.unwrap_or(0));
// Quick health check: ensure process did not exit immediately due to CLI errors (e.g., duplicate flags)
if let Some(pid_num) = pid {
thread::sleep(Duration::from_millis(300));
if !proc_exists(pid_num) {
// Tail log to surface the error cause
let tail_cmd = format!("tail -n 200 {}", shell_escape(&log_file));
println!("executing tail_cmd command:\n{tail_cmd}");
let tail = sal_process::run(&tail_cmd).die(false).silent(true).execute();
let mut log_snip = String::new();
if let Ok(res) = tail {
if res.success {
log_snip = res.stdout;
} else {
log_snip = format!("{}{}", res.stdout, res.stderr);
}
}
return Err(CloudHvError::CommandFailed(format!(
"cloud-hypervisor exited immediately after start. Log tail:\n{}",
log_snip
)));
}
} else {
return Err(CloudHvError::CommandFailed(
"failed to obtain cloud-hypervisor PID (start script did not write pid)".into(),
));
}
// Update state
rec.runtime.pid = pid;
@@ -349,6 +537,76 @@ echo $! > '{}'
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
write_json(&vm_json_path(id), &value)?;
println!("wrote JSON for VM");
// Best-effort: discover and print guest IPv4/IPv6 addresses (default-net path)
// Give DHCP/ND a moment
println!("waiting 5 secs for DHCP/ND");
thread::sleep(Duration::from_millis(5000));
let bridge_name = std::env::var("HERO_VIRT_BRIDGE_NAME").unwrap_or_else(|_| "br-hero".into());
let mac_lower = stable_mac_from_id(id).to_lowercase();
// IPv4 from dnsmasq leases (pinned per-bridge leasefile)
// Path set in ensure_host_net_prereq_dnsmasq_nftables: /var/lib/misc/dnsmasq-hero-$BR.leases
let lease_path = std::env::var("HERO_VIRT_DHCP_LEASE_FILE")
.unwrap_or_else(|_| format!("/var/lib/misc/dnsmasq-hero-{}.leases", bridge_name));
// Parse dnsmasq leases directly to avoid shell quoting/pipelines
let ipv4 = (|| {
let deadline = std::time::Instant::now() + Duration::from_secs(12);
loop {
if let Ok(content) = fs::read_to_string(&lease_path) {
let mut last_ip: Option<String> = None;
for line in content.lines() {
let cols: Vec<&str> = line.split_whitespace().collect();
if cols.len() >= 3 && cols[1].eq_ignore_ascii_case(&mac_lower) {
last_ip = Some(cols[2].to_string());
}
}
if last_ip.is_some() {
return last_ip;
}
}
if std::time::Instant::now() >= deadline {
return None;
}
thread::sleep(Duration::from_millis(800));
}
})();
println!(
"Got IPv4 from dnsmasq lease ({}): {}",
lease_path,
ipv4.clone().unwrap_or("not found".to_string())
);
// IPv6 from neighbor table on the bridge (exclude link-local), parsed in Rust
let ipv6 = (|| {
let cmd = format!("ip -6 neigh show dev {}", bridge_name);
if let Ok(res) = sal_process::run(&cmd).silent(true).die(false).execute() {
if res.success {
let mac_pat = format!("lladdr {}", mac_lower);
for line in res.stdout.lines() {
let lt = line.trim();
if lt.to_lowercase().contains(&mac_pat) {
if let Some(addr) = lt.split_whitespace().next() {
if !addr.starts_with("fe80") && !addr.is_empty() {
return Some(addr.to_string());
}
}
}
}
}
}
None
})();
println!("Got IPv6 from neighbor table on bridge: {}", ipv6.clone().unwrap_or("not found".to_string()));
println!(
"[cloudhv] VM '{}' guest addresses: IPv4={}, IPv6={}",
id,
ipv4.as_deref().unwrap_or(""),
ipv6.as_deref().unwrap_or("")
);
Ok(())
}
@@ -480,6 +738,256 @@ pub fn vm_list() -> Result<Vec<VmRecord>, CloudHvError> {
Ok(out)
}
fn tap_name_for_id(id: &str) -> String {
// Linux IFNAMSIZ is typically 15; keep "tap-" + 10 hex = 14 chars
let mut h = DefaultHasher::new();
id.hash(&mut h);
let v = h.finish();
let hex = format!("{:016x}", v);
format!("tap-{}", &hex[..10])
}
fn ensure_tap_for_vm(bridge_name: &str, id: &str) -> Result<String, CloudHvError> {
let tap = tap_name_for_id(id);
let body = format!(
"BR={br}
TAP={tap}
UIDX=$(id -u)
GIDX=$(id -g)
# Create TAP if missing and assign to current user/group
ip link show \"$TAP\" >/dev/null 2>&1 || ip tuntap add dev \"$TAP\" mode tap user \"$UIDX\" group \"$GIDX\"
# Enslave to bridge and bring up (idempotent)
ip link set \"$TAP\" master \"$BR\" 2>/dev/null || true
ip link set \"$TAP\" up
",
br = shell_escape(bridge_name),
tap = shell_escape(&tap),
);
let heredoc_tap = format!("bash -e -s <<'EOF'\n{}\nEOF\n", body);
match sal_process::run(&heredoc_tap).silent(true).execute() {
Ok(res) if res.success => Ok(tap),
Ok(res) => Err(CloudHvError::CommandFailed(format!(
"Failed to ensure TAP '{}': {}",
tap, res.stderr
))),
Err(e) => Err(CloudHvError::CommandFailed(format!(
"Failed to ensure TAP '{}': {}",
tap, e
))),
}
}
fn stable_mac_from_id(id: &str) -> String {
let mut h = DefaultHasher::new();
id.hash(&mut h);
let v = h.finish();
let b0 = (((v >> 40) & 0xff) as u8 & 0xfe) | 0x02; // locally administered, unicast
let b1 = ((v >> 32) & 0xff) as u8;
let b2 = ((v >> 24) & 0xff) as u8;
let b3 = ((v >> 16) & 0xff) as u8;
let b4 = ((v >> 8) & 0xff) as u8;
let b5 = (v & 0xff) as u8;
format!("{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", b0, b1, b2, b3, b4, b5)
}
/// Discover the mycelium IPv6 address by inspecting the interface itself (no CLI dependency).
/// Returns (interface_name, first global IPv6 address found on the interface).
fn get_mycelium_ipv6_addr(iface_hint: &str) -> Result<(String, String), CloudHvError> {
let iface = std::env::var("HERO_VIRT_MYCELIUM_IF").unwrap_or_else(|_| iface_hint.to_string());
// Query IPv6 addresses on the interface
let cmd = format!("ip -6 addr show dev {}", shell_escape(&iface));
let res = sal_process::run(&cmd).silent(true).die(false).execute();
let out = match res {
Ok(r) if r.success => r.stdout,
_ => {
return Err(CloudHvError::DependencyMissing(format!(
"mycelium interface '{}' not found or no IPv6 configured",
iface
)))
}
};
// Extract the first global IPv6 address present on the interface.
for line in out.lines() {
let lt = line.trim();
// Example line: "inet6 578:9fcf:.../7 scope global"
if lt.starts_with("inet6 ") && lt.contains("scope global") {
let parts: Vec<&str> = lt.split_whitespace().collect();
if let Some(addr_cidr) = parts.get(1) {
let addr_only = addr_cidr.split('/').next().unwrap_or("").trim();
println!("got addr from host: {addr_only}");
if !addr_only.is_empty() && addr_only.parse::<std::net::Ipv6Addr>().is_ok() {
return Ok((iface, addr_only.to_string()));
}
}
}
}
Err(CloudHvError::DependencyMissing(format!(
"no global IPv6 found on interface '{}'",
iface
)))
}
/// Derive a /64 prefix P from the mycelium IPv6 and return (P/64, P::2/64).
fn derive_ipv6_prefix_from_mycelium(m: &str) -> Result<(String, String), CloudHvError> {
let ip = m
.parse::<std::net::Ipv6Addr>()
.map_err(|e| CloudHvError::InvalidSpec(format!("invalid mycelium IPv6 address '{}': {}", m, e)))?;
let seg = ip.segments(); // [u16; 8]
// Take the top /64 from the mycelium address; zero the host half
let pfx = std::net::Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, 0);
// Router address for the bridge = P::2
let router = std::net::Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, 2);
let pfx_str = format!("{}/64", pfx);
let router_cidr = format!("{}/64", router);
Ok((pfx_str, router_cidr))
}
fn ensure_host_net_prereq_dnsmasq_nftables(
bridge_name: &str,
bridge_addr_cidr: &str,
subnet_cidr: &str,
dhcp_start: &str,
dhcp_end: &str,
ipv6_bridge_cidr: Option<&str>,
mycelium_if: Option<&str>,
) -> Result<(), CloudHvError> {
// Dependencies
for bin in ["ip", "nft", "dnsmasq", "systemctl"] {
if sal_process::which(bin).is_none() {
return Err(CloudHvError::DependencyMissing(format!(
"{} not found on PATH; required for VM networking",
bin
)));
}
}
// Prepare optional IPv6 value (empty string when disabled)
let ipv6_cidr = ipv6_bridge_cidr.unwrap_or("");
// Build idempotent setup script
let body = format!(
"set -e
BR={br}
BR_ADDR={br_addr}
SUBNET={subnet}
DHCP_START={dstart}
DHCP_END={dend}
IPV6_CIDR={v6cidr}
LEASE_FILE=/var/lib/misc/dnsmasq-hero-$BR.leases
# Determine default WAN interface
WAN_IF=$(ip -o route show default | awk '{{print $5}}' | head -n1)
if [ -z \"$WAN_IF\" ]; then
echo \"No default WAN interface detected (required for IPv4 NAT)\" >&2
exit 2
fi
# Bridge creation (idempotent)
ip link show \"$BR\" >/dev/null 2>&1 || ip link add name \"$BR\" type bridge
ip addr replace \"$BR_ADDR\" dev \"$BR\"
ip link set \"$BR\" up
# IPv6 bridge address and forwarding (optional)
if [ -n \"$IPV6_CIDR\" ]; then
ip -6 addr replace \"$IPV6_CIDR\" dev \"$BR\"
sysctl -w net.ipv6.conf.all.forwarding=1 >/dev/null || true
fi
# IPv4 forwarding
sysctl -w net.ipv4.ip_forward=1 >/dev/null
# nftables NAT (idempotent) for IPv4
nft list table ip hero >/dev/null 2>&1 || nft add table ip hero
nft list chain ip hero postrouting >/dev/null 2>&1 || nft add chain ip hero postrouting {{ type nat hook postrouting priority 100 \\; }}
nft list chain ip hero postrouting | grep -q \"ip saddr $SUBNET oifname \\\"$WAN_IF\\\" masquerade\" \
|| nft add rule ip hero postrouting ip saddr $SUBNET oifname \"$WAN_IF\" masquerade
# dnsmasq DHCPv4 + RA/DHCPv6 config (idempotent)
mkdir -p /etc/dnsmasq.d
mkdir -p /var/lib/misc
CFG=/etc/dnsmasq.d/hero-$BR.conf
TMP=/etc/dnsmasq.d/.hero-$BR.conf.new
RELOAD=0
CONF=/etc/dnsmasq.conf
# Ensure conf-dir includes /etc/dnsmasq.d (simple fixed-string check to avoid regex escapes in Rust)
if ! grep -qF \"conf-dir=/etc/dnsmasq.d\" \"$CONF\"; then
printf '%s\n' 'conf-dir=/etc/dnsmasq.d,*.conf' >> \"$CONF\"
RELOAD=1
fi
# Ensure lease file exists and is writable by dnsmasq user
touch \"$LEASE_FILE\" || true
chown dnsmasq:dnsmasq \"$LEASE_FILE\" 2>/dev/null || true
# Always include IPv4 section
printf '%s\n' \
\"interface=$BR\" \
\"bind-interfaces\" \
\"dhcp-authoritative\" \
\"dhcp-range=$DHCP_START,$DHCP_END,12h\" \
\"dhcp-option=option:dns-server,1.1.1.1,8.8.8.8\" \
\"dhcp-leasefile=$LEASE_FILE\" >\"$TMP\"
# Optionally append IPv6 RA/DHCPv6
if [ -n \"$IPV6_CIDR\" ]; then
printf '%s\n' \
\"enable-ra\" \
\"dhcp-range=::,constructor:BR_PLACEHOLDER,ra-names,64,12h\" \
\"dhcp-option=option6:dns-server,[2001:4860:4860::8888],[2606:4700:4700::1111]\" >>\"$TMP\"
sed -i \"s/BR_PLACEHOLDER/$BR/g\" \"$TMP\"
fi
if [ ! -f \"$CFG\" ] || ! cmp -s \"$CFG\" \"$TMP\"; then
mv \"$TMP\" \"$CFG\"
if systemctl is-active --quiet dnsmasq; then
systemctl reload dnsmasq || systemctl restart dnsmasq || true
else
systemctl enable --now dnsmasq || true
fi
else
rm -f \"$TMP\"
systemctl enable --now dnsmasq || true
fi
# Reload if main conf was updated to include conf-dir
if [ \"$RELOAD\" = \"1\" ]; then
systemctl reload dnsmasq || systemctl restart dnsmasq || true
fi
",
br = shell_escape(bridge_name),
br_addr = shell_escape(bridge_addr_cidr),
subnet = shell_escape(subnet_cidr),
dstart = shell_escape(dhcp_start),
dend = shell_escape(dhcp_end),
v6cidr = shell_escape(ipv6_cidr),
);
// Use a unique heredoc delimiter to avoid clashing with inner <<EOF blocks
let heredoc_net = format!("bash -e -s <<'HERONET'\n{}\nHERONET\n", body);
println!("executing command:\n{heredoc_net}");
match sal_process::run(&heredoc_net).silent(true).execute() {
Ok(res) if res.success => Ok(()),
Ok(res) => Err(CloudHvError::CommandFailed(format!(
"Host networking setup failed: {}",
res.stderr
))),
Err(e) => Err(CloudHvError::CommandFailed(format!(
"Host networking setup failed: {}",
e
))),
}
}
/// Render a shell-safe command string from vector of tokens
fn shell_join(parts: &Vec<String>) -> String {
let mut s = String::new();

View File

@@ -0,0 +1,196 @@
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::Path;
use sal_os;
use sal_process;
/// Host dependency check error
#[derive(Debug)]
pub enum HostCheckError {
Io(String),
}
impl std::fmt::Display for HostCheckError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HostCheckError::Io(e) => write!(f, "IO error: {}", e),
}
}
}
impl std::error::Error for HostCheckError {}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HostCheckReport {
pub ok: bool,
pub critical: Vec<String>,
pub optional: Vec<String>,
pub notes: Vec<String>,
}
fn hero_vm_root() -> String {
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into());
format!("{}/hero/virt/vms", home.trim_end_matches('/'))
}
fn bin_missing(name: &str) -> bool {
sal_process::which(name).is_none()
}
/// Perform host dependency checks required for image preparation and Cloud Hypervisor run.
/// Returns a structured report that Rhai can consume easily.
pub fn host_check_deps() -> Result<HostCheckReport, HostCheckError> {
let mut critical: Vec<String> = Vec::new();
let mut optional: Vec<String> = Vec::new();
let mut notes: Vec<String> = Vec::new();
// Must run as root
let uid_res = sal_process::run("id -u").silent(true).die(false).execute();
match uid_res {
Ok(r) if r.success => {
let uid_s = r.stdout.trim();
if uid_s != "0" {
critical.push("not running as root (required for nbd/mount/network)".into());
}
}
_ => {
notes.push("failed to determine uid via `id -u`".into());
}
}
// Core binaries required for CH and image manipulation
let core_bins = [
"cloud-hypervisor", // CH binary (dynamic)
"cloud-hypervisor-static", // CH static (if present)
"ch-remote",
"ch-remote-static",
// hypervisor-fw is expected at /images/hypervisor-fw (not on PATH)
"qemu-img",
"qemu-nbd",
"blkid",
"tune2fs",
"partprobe",
"mount",
"umount",
"sed",
"awk",
"modprobe",
];
// Networking helpers (for default bridge + NAT path)
let net_bins = ["ip", "nft", "dnsmasq", "systemctl"];
// Evaluate presence
let mut have_any_ch = false;
if !bin_missing("cloud-hypervisor") || !bin_missing("cloud-hypervisor-static") {
have_any_ch = true;
}
if !have_any_ch {
critical.push("cloud-hypervisor or cloud-hypervisor-static not found on PATH".into());
}
if bin_missing("ch-remote") && bin_missing("ch-remote-static") {
critical.push("ch-remote or ch-remote-static not found on PATH".into());
}
for b in [&core_bins[4..], &net_bins[..]].concat() {
if bin_missing(b) {
// treat qemu/img/nbd stack and filesystem tools as critical
// treat networking tools as critical too since default path provisions bridge/DHCP
critical.push(format!("missing binary: {}", b));
}
}
// Filesystem/path checks
// Ensure /images exists and expected image files are present (ubuntu, alpine, hypervisor-fw)
let images_root = "/images";
if !Path::new(images_root).exists() {
critical.push(format!("{} not found (expected base images directory)", images_root));
} else {
let ubuntu_path = format!("{}/noble-server-cloudimg-amd64.img", images_root);
let alpine_path = format!("{}/alpine-virt-cloudimg-amd64.qcow2", images_root);
let fw_path = format!("{}/hypervisor-fw", images_root);
if !Path::new(&ubuntu_path).exists() {
critical.push(format!("missing base image: {}", ubuntu_path));
}
if !Path::new(&alpine_path).exists() {
critical.push(format!("missing base image: {}", alpine_path));
}
if !Path::new(&fw_path).exists() {
critical.push(format!("missing firmware: {}", fw_path));
}
}
// Ensure VM root directory is writable/creatable
let vm_root = hero_vm_root();
if let Err(e) = sal_os::mkdir(&vm_root) {
critical.push(format!(
"cannot create/access VM root directory {}: {}",
vm_root, e
));
} else {
// also try writing a small file
let probe_path = format!("{}/.__hero_probe", vm_root);
if let Err(e) = fs::write(&probe_path, b"ok") {
critical.push(format!(
"VM root not writable {}: {}",
vm_root, e
));
} else {
let _ = fs::remove_file(&probe_path);
}
}
// Optional Mycelium IPv6 checks when enabled via env
let ipv6_env = std::env::var("HERO_VIRT_IPV6_ENABLE").unwrap_or_else(|_| "".into());
let ipv6_enabled = ipv6_env.eq_ignore_ascii_case("1") || ipv6_env.eq_ignore_ascii_case("true");
if ipv6_enabled {
// Require mycelium CLI
if bin_missing("mycelium") {
critical.push("mycelium CLI not found on PATH (required when HERO_VIRT_IPV6_ENABLE=true)".into());
}
// Validate interface presence and global IPv6
let ifname = std::env::var("HERO_VIRT_MYCELIUM_IF").unwrap_or_else(|_| "mycelium".into());
let check_if = sal_process::run(&format!("ip -6 addr show dev {}", ifname))
.silent(true)
.die(false)
.execute();
match check_if {
Ok(r) if r.success => {
let out = r.stdout;
if !(out.contains("inet6") && out.contains("scope global")) {
notes.push(format!(
"iface '{}' present but no global IPv6 detected; Mycelium may not be up yet",
ifname
));
}
}
_ => {
critical.push(format!(
"iface '{}' not found or no IPv6; ensure Mycelium is running",
ifname
));
}
}
// Best-effort: parse `mycelium inspect` for Address
let insp = sal_process::run("mycelium inspect").silent(true).die(false).execute();
match insp {
Ok(res) if res.success && res.stdout.contains("Address:") => {
// good enough
}
_ => {
notes.push("`mycelium inspect` did not return an Address; IPv6 overlay may be unavailable".into());
}
}
}
// Summarize ok flag
let ok = critical.is_empty();
Ok(HostCheckReport {
ok,
critical,
optional,
notes,
})
}

View File

@@ -0,0 +1,569 @@
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::Path;
use sal_os;
use sal_process;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::net::Ipv6Addr;
#[derive(Debug)]
pub enum ImagePrepError {
Io(String),
InvalidInput(String),
CommandFailed(String),
NotImplemented(String),
}
impl std::fmt::Display for ImagePrepError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ImagePrepError::Io(e) => write!(f, "IO error: {}", e),
ImagePrepError::InvalidInput(e) => write!(f, "Invalid input: {}", e),
ImagePrepError::CommandFailed(e) => write!(f, "Command failed: {}", e),
ImagePrepError::NotImplemented(e) => write!(f, "Not implemented: {}", e),
}
}
}
impl std::error::Error for ImagePrepError {}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Flavor {
Ubuntu,
Alpine,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetPlanOpts {
#[serde(default = "default_dhcp4")]
pub dhcp4: bool,
#[serde(default)]
pub dhcp6: bool,
/// Static IPv6 address to assign in guest (temporary behavior)
pub ipv6_addr: Option<String>, // e.g., "400::10/64"
pub gw6: Option<String>, // e.g., "400::1"
}
fn default_dhcp4() -> bool {
true
}
impl Default for NetPlanOpts {
fn default() -> Self {
Self {
dhcp4: true,
dhcp6: true,
ipv6_addr: None,
gw6: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ImagePrepOptions {
pub flavor: Flavor,
/// VM id (used for working directory layout and tap/mac derivations)
pub id: String,
/// Optional source path override, defaults to /images/<flavor default filename>
pub source: Option<String>,
/// Optional VM target directory, defaults to $HOME/hero/virt/vms/<id>
pub target_dir: Option<String>,
/// Netplan options
#[serde(default)]
pub net: NetPlanOpts,
/// Disable cloud-init networking
#[serde(default = "default_disable_cloud_init_net")]
pub disable_cloud_init_net: bool,
}
fn default_disable_cloud_init_net() -> bool {
true
}
fn stable_mac_from_id(id: &str) -> String {
let mut h = DefaultHasher::new();
id.hash(&mut h);
let v = h.finish();
let b0 = (((v >> 40) & 0xff) as u8 & 0xfe) | 0x02; // locally administered, unicast
let b1 = ((v >> 32) & 0xff) as u8;
let b2 = ((v >> 24) & 0xff) as u8;
let b3 = ((v >> 16) & 0xff) as u8;
let b4 = ((v >> 8) & 0xff) as u8;
let b5 = (v & 0xff) as u8;
format!("{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", b0, b1, b2, b3, b4, b5)
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ImagePrepResult {
pub raw_disk: String,
pub root_uuid: String,
pub boot_uuid: String,
pub work_qcow2: String,
}
fn hero_vm_root() -> String {
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into());
format!("{}/hero/virt/vms", home.trim_end_matches('/'))
}
fn default_source_for_flavor(flavor: &Flavor) -> (&'static str, bool) {
match flavor {
Flavor::Ubuntu => ("/images/noble-server-cloudimg-amd64.img", true),
Flavor::Alpine => ("/images/alpine-virt-cloudimg-amd64.qcow2", true),
}
}
fn fail(e: &str) -> ImagePrepError {
ImagePrepError::CommandFailed(e.to_string())
}
fn run_script(script: &str) -> Result<sal_process::CommandResult, ImagePrepError> {
match sal_process::run(script).silent(true).die(false).execute() {
Ok(res) => {
if res.success {
Ok(res)
} else {
Err(ImagePrepError::CommandFailed(format!(
"{}{}",
res.stdout, res.stderr
)))
}
}
Err(e) => Err(ImagePrepError::CommandFailed(e.to_string())),
}
}
/// Prepare a base cloud image for booting under Cloud Hypervisor:
/// - make a per-VM working copy
/// - attach via nbd, mount root/boot
/// - retag UUIDs, update fstab, write minimal grub.cfg
/// - generate netplan (DHCPv4, static IPv6 placeholder), disable cloud-init net
/// - convert to raw disk in VM dir
pub fn image_prepare(opts: &ImagePrepOptions) -> Result<ImagePrepResult, ImagePrepError> {
// Resolve source image
let (def_src, _must_exist) = default_source_for_flavor(&opts.flavor);
let src = opts.source.clone().unwrap_or_else(|| def_src.to_string());
if !Path::new(&src).exists() {
return Err(ImagePrepError::InvalidInput(format!(
"source image not found: {}",
src
)));
}
// Resolve VM dir
let vm_dir = opts
.target_dir
.clone()
.unwrap_or_else(|| format!("{}/{}", hero_vm_root(), opts.id));
sal_os::mkdir(&vm_dir).map_err(|e| ImagePrepError::Io(e.to_string()))?;
// Work qcow2 copy path and mount points
let work_qcow2 = format!("{}/work.qcow2", vm_dir);
let raw_path = format!("{}/disk.raw", vm_dir);
let mnt_root = format!("/mnt/hero-img/{}/root", opts.id);
let mnt_boot = format!("/mnt/hero-img/{}/boot", opts.id);
// Only Ubuntu implemented for now
match opts.flavor {
Flavor::Ubuntu => {
// Build bash script that performs all steps and echos "RAW|ROOT_UUID|BOOT_UUID" at end
let disable_ci_net = opts.disable_cloud_init_net;
// IPv6 static guest assignment (derive from mycelium interface) - enabled by default
// If HERO_VIRT_IPV6_STATIC_GUEST=false, keep dynamic behavior (SLAAC/DHCPv6).
let static_v6 = std::env::var("HERO_VIRT_IPV6_STATIC_GUEST")
.map(|v| matches!(v.to_lowercase().as_str(), "" | "1" | "true" | "yes"))
.unwrap_or(true);
let myc_if = std::env::var("HERO_VIRT_MYCELIUM_IF").unwrap_or_else(|_| "mycelium".into());
// Discover host mycelium global IPv6 in 400::/7 from the interface
let mut host_v6: Option<Ipv6Addr> = None;
if static_v6 {
let cmd = format!("ip -6 addr show dev {}", shell_escape(&myc_if));
if let Ok(r) = sal_process::run(&cmd).silent(true).die(false).execute() {
if r.success {
for l in r.stdout.lines() {
let lt = l.trim();
if lt.starts_with("inet6 ") && lt.contains("scope global") {
if let Some(addr_cidr) = lt.split_whitespace().nth(1) {
let addr_only = addr_cidr.split('/').next().unwrap_or("").trim();
if let Ok(ip) = addr_only.parse::<Ipv6Addr>() {
let seg0 = ip.segments()[0];
if (seg0 & 0xFE00) == 0x0400 {
host_v6 = Some(ip);
break;
}
}
}
}
}
}
}
}
// Derive per-host /64 from mycelium and deterministic per-VM guest address
let mut np_v6_block = String::new();
let mut dhcp6_effective = opts.net.dhcp6;
if static_v6 {
if let Some(h) = host_v6 {
let seg = h.segments();
// Router = P::2; Guest address = P::<stable suffix>
let mut hasher = DefaultHasher::new();
opts.id.hash(&mut hasher);
let mut suffix = (hasher.finish() as u16) & 0xfffe;
if suffix == 0 || suffix == 2 {
suffix = 0x100;
}
let guest_ip = Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, suffix).to_string();
let gw_ip = Ipv6Addr::new(seg[0], seg[1], seg[2], seg[3], 0, 0, 0, 2).to_string();
// Inject a YAML block for static v6
np_v6_block = format!(
" addresses:\n - {}/64\n routes:\n - to: \"::/0\"\n via: {}\n",
guest_ip, gw_ip
);
// Disable dhcp6 when we provide a static address
dhcp6_effective = false;
}
}
// Keep script small and robust; avoid brace-heavy awk to simplify escaping.
// Compute stable MAC (must match what vm_start() uses) and use it to match NIC in netplan.
let vm_mac = stable_mac_from_id(&opts.id);
let script = format!(
"#!/bin/bash -e
set -euo pipefail
SRC={src}
VM_DIR={vm_dir}
WORK={work}
MNT_ROOT={mnt_root}
MNT_BOOT={mnt_boot}
RAW={raw}
mkdir -p \"$VM_DIR\"
mkdir -p \"$(dirname \"$MNT_ROOT\")\"
mkdir -p \"$MNT_ROOT\" \"$MNT_BOOT\"
# Make per-VM working copy (reflink if supported)
cp --reflink=auto -f \"$SRC\" \"$WORK\"
# Load NBD with sufficient partitions
modprobe nbd max_part=63
# Pick a free /dev/nbdX and connect the qcow2
NBD=\"\"
for i in $(seq 0 15); do
DEV=\"/dev/nbd$i\"
# Skip devices that have any mounted partitions (avoid reusing in-use NBDs)
if findmnt -rn -S \"$DEV\" >/dev/null 2>&1 || \
findmnt -rn -S \"${{DEV}}p1\" >/dev/null 2>&1 || \
findmnt -rn -S \"${{DEV}}p14\" >/dev/null 2>&1 || \
findmnt -rn -S \"${{DEV}}p15\" >/dev/null 2>&1 || \
findmnt -rn -S \"${{DEV}}p16\" >/dev/null 2>&1; then
continue
fi
# Ensure it's not connected (ignore errors if already disconnected)
qemu-nbd --disconnect \"$DEV\" >/dev/null 2>&1 || true
if qemu-nbd --format=qcow2 --connect=\"$DEV\" \"$WORK\"; then
NBD=\"$DEV\"
break
fi
done
if [ -z \"$NBD\" ]; then
echo \"No free /dev/nbdX device available\" >&2
exit 1
fi
echo \"Selected NBD: $NBD\" >&2
# Settle and probe partitions
udevadm settle >/dev/null 2>&1 || true
blockdev --rereadpt \"$NBD\" >/dev/null 2>&1 || true
partprobe \"$NBD\" >/dev/null 2>&1 || true
for t in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15; do
if [ -b \"${{NBD}}p1\" ]; then
sz=$(blockdev --getsize64 \"${{NBD}}p1\" 2>/dev/null || echo 0)
if [ \"$sz\" -gt 0 ]; then
break
fi
fi
sleep 0.4
udevadm settle >/dev/null 2>&1 || true
blockdev --rereadpt \"$NBD\" >/dev/null 2>&1 || true
partprobe \"$NBD\" >/dev/null 2>&1 || true
done
ROOT_DEV=\"${{NBD}}p1\"
# Prefer p16, else p15
if [ -b \"${{NBD}}p16\" ]; then
BOOT_DEV=\"${{NBD}}p16\"
elif [ -b \"${{NBD}}p15\" ]; then
BOOT_DEV=\"${{NBD}}p15\"
else
echo \"Boot partition not found on $NBD (tried p16 and p15)\" >&2
exit 33
fi
echo \"ROOT_DEV=$ROOT_DEV BOOT_DEV=$BOOT_DEV\" >&2
if [ ! -b \"$ROOT_DEV\" ]; then
echo \"Root partition not found: $ROOT_DEV\" >&2
exit 32
fi
cleanup() {{
set +e
umount \"$MNT_BOOT\" 2>/dev/null || true
umount \"$MNT_ROOT\" 2>/dev/null || true
[ -n \"$NBD\" ] && qemu-nbd --disconnect \"$NBD\" 2>/dev/null || true
rmmod nbd 2>/dev/null || true
}}
trap cleanup EXIT
# Ensure partitions are readable before mounting
for t in 1 2 3 4 5 6 7 8; do
szr=$(blockdev --getsize64 \"$ROOT_DEV\" 2>/dev/null || echo 0)
szb=$(blockdev --getsize64 \"$BOOT_DEV\" 2>/dev/null || echo 0)
if [ \"$szr\" -gt 0 ] && [ \"$szb\" -gt 0 ] && blkid \"$ROOT_DEV\" >/dev/null 2>&1; then
break
fi
sleep 0.4
udevadm settle >/dev/null 2>&1 || true
blockdev --rereadpt \"$NBD\" >/dev/null 2>&1 || true
partprobe \"$NBD\" >/dev/null 2>&1 || true
done
# Mount and mutate (with retries to avoid races)
mounted_root=0
for t in 1 2 3 4 5 6 7 8 9 10; do
if mount \"$ROOT_DEV\" \"$MNT_ROOT\"; then
mounted_root=1
break
fi
sleep 0.5
udevadm settle >/dev/null 2>&1 || true
partprobe \"$NBD\" >/dev/null 2>&1 || true
done
if [ \"$mounted_root\" -ne 1 ]; then
echo \"Failed to mount root $ROOT_DEV\" >&2
exit 32
fi
mounted_boot=0
for t in 1 2 3 4 5; do
if mount \"$BOOT_DEV\" \"$MNT_BOOT\"; then
mounted_boot=1
break
fi
sleep 0.5
udevadm settle >/dev/null 2>&1 || true
partprobe \"$NBD\" >/dev/null 2>&1 || true
done
if [ \"$mounted_boot\" -ne 1 ]; then
echo \"Failed to mount boot $BOOT_DEV\" >&2
exit 33
fi
# Change UUIDs (best-effort)
tune2fs -U random \"$ROOT_DEV\" || true
tune2fs -U random \"$BOOT_DEV\" || true
ROOT_UUID=$(blkid -o value -s UUID \"$ROOT_DEV\")
BOOT_UUID=$(blkid -o value -s UUID \"$BOOT_DEV\")
# Update fstab
sed -i \"s/UUID=[a-f0-9-]* \\/ /UUID=$ROOT_UUID \\/ /\" \"$MNT_ROOT/etc/fstab\"
sed -i \"s/UUID=[a-f0-9-]* \\/boot /UUID=$BOOT_UUID \\/boot /\" \"$MNT_ROOT/etc/fstab\"
# Minimal grub.cfg (note: braces escaped for Rust format!)
mkdir -p \"$MNT_BOOT/grub\"
KERNEL=$(ls -1 \"$MNT_BOOT\"/vmlinuz-* | sort -V | tail -n1 | xargs -n1 basename)
INITRD=$(ls -1 \"$MNT_BOOT\"/initrd.img-* | sort -V | tail -n1 | xargs -n1 basename)
cat > \"$MNT_BOOT/grub/grub.cfg\" << EOF
set default=0
set timeout=3
menuentry 'Ubuntu Cloud' {{
insmod part_gpt
insmod ext2
insmod gzio
search --no-floppy --fs-uuid --set=root $BOOT_UUID
linux /$KERNEL root=/dev/vda1 ro console=ttyS0
initrd /$INITRD
}}
EOF
# Netplan config
rm -f \"$MNT_ROOT\"/etc/netplan/*.yaml
mkdir -p \"$MNT_ROOT\"/etc/netplan
cat > \"$MNT_ROOT/etc/netplan/01-netconfig.yaml\" << EOF
network:
version: 2
ethernets:
eth0:
match:
macaddress: {vm_mac}
set-name: eth0
dhcp4: {dhcp4}
dhcp6: {dhcp6}
{np_v6_block} nameservers:
addresses: [8.8.8.8, 1.1.1.1, 2001:4860:4860::8888]
EOF
# Enable SSH password authentication and set a default password for 'ubuntu'
mkdir -p \"$MNT_ROOT/etc/cloud/cloud.cfg.d\"
printf '%s\n' 'ssh_pwauth: true' > \"$MNT_ROOT/etc/cloud/cloud.cfg.d/99-ssh-password-auth.cfg\"
mkdir -p \"$MNT_ROOT/etc/ssh/sshd_config.d\"
cat > \"$MNT_ROOT/etc/ssh/sshd_config.d/99-hero-password-auth.conf\" << EOF
PasswordAuthentication yes
KbdInteractiveAuthentication yes
UsePAM yes
EOF
# Set password for default user 'ubuntu'
if chroot \"$MNT_ROOT\" getent passwd ubuntu >/dev/null 2>&1; then
chroot \"$MNT_ROOT\" bash -c \"echo 'ubuntu:ubuntu' | chpasswd\" || true
fi
# Ensure openssh-server is present (some cloud images may omit it)
# Ensure SSH service enabled and keys generated on boot
chroot \"$MNT_ROOT\" systemctl unmask ssh 2>/dev/null || true
chroot \"$MNT_ROOT\" systemctl enable ssh 2>/dev/null || true
chroot \"$MNT_ROOT\" systemctl enable ssh-keygen.service 2>/dev/null || true
# Ensure sshd listens on both IPv4 and IPv6 explicitly
cat > \"$MNT_ROOT/etc/ssh/sshd_config.d/99-hero-address-family.conf\" << EOF
AddressFamily any
ListenAddress ::
ListenAddress 0.0.0.0
EOF
# If UFW is present, allow SSH and disable firewall (for tests)
if chroot \"$MNT_ROOT\" command -v ufw >/dev/null 2>&1; then
chroot \"$MNT_ROOT\" ufw allow OpenSSH || true
chroot \"$MNT_ROOT\" ufw disable || true
fi
if ! chroot \"$MNT_ROOT\" test -x /usr/sbin/sshd; then
cp -f /etc/resolv.conf \"$MNT_ROOT/etc/resolv.conf\" 2>/dev/null || true
chroot \"$MNT_ROOT\" bash -c \"apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends openssh-server\" || true
fi
# Ensure default user 'ubuntu' exists (fallback for minimal images)
if ! chroot \"$MNT_ROOT\" id -u ubuntu >/dev/null 2>&1; then
chroot \"$MNT_ROOT\" useradd -m -s /bin/bash ubuntu || true
echo \"ubuntu ALL=(ALL) NOPASSWD:ALL\" > \"$MNT_ROOT/etc/sudoers.d/90-ubuntu\" || true
chmod 0440 \"$MNT_ROOT/etc/sudoers.d/90-ubuntu\" || true
fi
# Re-assert password (covers both existing and newly created users)
if chroot \"$MNT_ROOT\" getent passwd ubuntu >/dev/null 2>&1; then
chroot \"$MNT_ROOT\" bash -c \"echo 'ubuntu:ubuntu' | chpasswd\" || true
fi
# Pre-generate host SSH keys so sshd can start immediately
chroot \"$MNT_ROOT\" ssh-keygen -A 2>/dev/null || true
mkdir -p \"$MNT_ROOT/var/run/sshd\"
# Also enable socket activation as a fallback
chroot \"$MNT_ROOT\" systemctl enable ssh.socket 2>/dev/null || true
# Disable cloud-init networking (optional but default)
if [ \"{disable_ci_net}\" = \"true\" ]; then
mkdir -p \"$MNT_ROOT/etc/cloud/cloud.cfg.d\"
echo \"network: {{config: disabled}}\" > \"$MNT_ROOT/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg\"
fi
# Convert prepared image to raw (ensure source not locked)
umount \"$MNT_BOOT\" 2>/dev/null || true
umount \"$MNT_ROOT\" 2>/dev/null || true
if [ -n \"$NBD\" ]; then
qemu-nbd --disconnect \"$NBD\" 2>/dev/null || true
rmmod nbd 2>/dev/null || true
fi
rm -f \"$RAW\"
qemu-img convert -U -f qcow2 -O raw \"$WORK\" \"$RAW\"
# Output result triple ONLY on stdout, then prevent any further trap output
echo \"RESULT:$RAW|$ROOT_UUID|$BOOT_UUID\"
trap - EXIT
exit 0
",
src = shell_escape(&src),
vm_dir = shell_escape(&vm_dir),
work = shell_escape(&work_qcow2),
mnt_root = shell_escape(&mnt_root),
mnt_boot = shell_escape(&mnt_boot),
raw = shell_escape(&raw_path),
vm_mac = vm_mac,
dhcp4 = if opts.net.dhcp4 { "true" } else { "false" },
dhcp6 = if dhcp6_effective { "true" } else { "false" },
np_v6_block = np_v6_block,
disable_ci_net = if disable_ci_net { "true" } else { "false" },
);
// image prep script printout for debugging:
println!("{script}");
let res = run_script(&script)?;
// Prefer a RESULT:-prefixed line (robust against extra stdout noise)
let mut marker: Option<String> = None;
for l in res.stdout.lines().rev() {
let lt = l.trim();
if let Some(rest) = lt.strip_prefix("RESULT:") {
marker = Some(rest.trim().to_string());
break;
}
}
// Fallback: last line that looks like A|B|C
let line = if let Some(x) = marker {
x
} else {
let mut cand: Option<String> = None;
for l in res.stdout.lines().rev() {
let lt = l.trim();
if lt.split('|').count() == 3 {
cand = Some(lt.to_string());
break;
}
}
cand.ok_or_else(|| fail("no RAW|ROOT_UUID|BOOT_UUID line found in script output"))?
};
let parts: Vec<_> = line.split('|').map(|s| s.trim().to_string()).collect();
if parts.len() != 3 {
return Err(fail(&format!(
"unexpected output from image_prepare script, expected RAW|ROOT_UUID|BOOT_UUID, got: {}",
line
)));
}
Ok(ImagePrepResult {
raw_disk: parts[0].clone(),
root_uuid: parts[1].clone(),
boot_uuid: parts[2].clone(),
work_qcow2,
})
}
Flavor::Alpine => Err(ImagePrepError::NotImplemented(
"Alpine image_prepare not implemented yet".into(),
)),
}
}
fn shell_escape(s: &str) -> String {
if s.is_empty() {
return "''".into();
}
if s.chars().all(|c| c.is_ascii_alphanumeric() || "-_./=:".contains(c)) {
return s.into();
}
let mut out = String::from("'");
for ch in s.chars() {
if ch == '\'' {
out.push_str("'\"'\"'");
} else {
out.push(ch);
}
}
out.push('\'');
out
}

View File

@@ -26,6 +26,8 @@ pub mod nerdctl;
pub mod rfs;
pub mod qcow2;
pub mod cloudhv;
pub mod hostcheck;
pub mod image_prep;
pub mod rhai;

View File

@@ -10,6 +10,9 @@ pub mod nerdctl;
pub mod rfs;
pub mod qcow2;
pub mod cloudhv;
pub mod hostcheck;
pub mod image_prep;
pub mod cloudhv_builder;
/// Register all Virt module functions with the Rhai engine
///
@@ -36,6 +39,15 @@ pub fn register_virt_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult
// Register Cloud Hypervisor module functions
cloudhv::register_cloudhv_module(engine)?;
// Register Host dependency checker
hostcheck::register_hostcheck_module(engine)?;
// Register Image preparation functions
image_prep::register_image_prep_module(engine)?;
// Register Cloud Hypervisor builder and easy wrapper
cloudhv_builder::register_cloudhv_builder_module(engine)?;
Ok(())
}

View File

@@ -17,6 +17,7 @@ fn hv_to_rhai<T>(r: Result<T, cloudhv::CloudHvError>) -> Result<T, Box<EvalAltRe
fn map_to_vmspec(spec: Map) -> Result<VmSpec, Box<EvalAltResult>> {
let id = must_get_string(&spec, "id")?;
let kernel_path = get_string(&spec, "kernel_path");
let initramfs_path = get_string(&spec, "initramfs_path");
let firmware_path = get_string(&spec, "firmware_path");
let disk_path = must_get_string(&spec, "disk_path")?;
let api_socket = get_string(&spec, "api_socket").unwrap_or_else(|| "".to_string());
@@ -28,6 +29,7 @@ fn map_to_vmspec(spec: Map) -> Result<VmSpec, Box<EvalAltResult>> {
Ok(VmSpec {
id,
kernel_path,
initramfs_path,
firmware_path,
disk_path,
api_socket,
@@ -46,6 +48,11 @@ fn vmspec_to_map(s: &VmSpec) -> Map {
} else {
m.insert("kernel_path".into(), Dynamic::UNIT);
}
if let Some(ir) = &s.initramfs_path {
m.insert("initramfs_path".into(), ir.clone().into());
} else {
m.insert("initramfs_path".into(), Dynamic::UNIT);
}
if let Some(fw) = &s.firmware_path {
m.insert("firmware_path".into(), fw.clone().into());
} else {

View File

@@ -0,0 +1,136 @@
use crate::cloudhv::builder::CloudHvBuilder;
use crate::hostcheck::host_check_deps;
use crate::image_prep::{image_prepare, Flavor as ImgFlavor, ImagePrepOptions, NetPlanOpts};
use rhai::{Engine, EvalAltResult, Map};
fn builder_new(id: &str) -> CloudHvBuilder {
CloudHvBuilder::new(id)
}
// Functional, chainable-style helpers (consume and return the builder)
fn builder_memory_mb(mut b: CloudHvBuilder, mb: i64) -> CloudHvBuilder {
if mb > 0 {
b.memory_mb(mb as u32);
}
b
}
fn builder_vcpus(mut b: CloudHvBuilder, v: i64) -> CloudHvBuilder {
if v > 0 {
b.vcpus(v as u32);
}
b
}
fn builder_disk(mut b: CloudHvBuilder, path: &str) -> CloudHvBuilder {
b.disk(path);
b
}
fn builder_disk_from_flavor(mut b: CloudHvBuilder, flavor: &str) -> CloudHvBuilder {
b.disk_from_flavor(flavor);
b
}
fn builder_cmdline(mut b: CloudHvBuilder, c: &str) -> CloudHvBuilder {
b.cmdline(c);
b
}
fn builder_extra_arg(mut b: CloudHvBuilder, a: &str) -> CloudHvBuilder {
b.extra_arg(a);
b
}
fn builder_no_default_net(mut b: CloudHvBuilder) -> CloudHvBuilder {
b.no_default_net();
b
}
fn builder_launch(mut b: CloudHvBuilder) -> Result<String, Box<EvalAltResult>> {
b.launch().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("cloudhv builder launch failed: {}", e).into(),
rhai::Position::NONE,
))
})
}
// Noob-friendly one-shot wrapper
fn vm_easy_launch(flavor: &str, id: &str, memory_mb: i64, vcpus: i64) -> Result<String, Box<EvalAltResult>> {
// Preflight
let report = host_check_deps().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("host_check failed: {}", e).into(),
rhai::Position::NONE,
))
})?;
if !report.ok {
return Err(Box::new(EvalAltResult::ErrorRuntime(
format!("missing dependencies: {:?}", report.critical).into(),
rhai::Position::NONE,
)));
}
// Prepare image to raw using defaults (DHCPv4 + placeholder v6 + disable cloud-init net)
let img_flavor = match flavor {
"ubuntu" | "Ubuntu" | "UBUNTU" => ImgFlavor::Ubuntu,
"alpine" | "Alpine" | "ALPINE" => ImgFlavor::Alpine,
_ => ImgFlavor::Ubuntu,
};
let prep_opts = ImagePrepOptions {
flavor: img_flavor,
id: id.to_string(),
source: None,
target_dir: None,
net: NetPlanOpts::default(),
disable_cloud_init_net: true,
};
let prep = image_prepare(&prep_opts).map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("image_prepare failed: {}", e).into(),
rhai::Position::NONE,
))
})?;
// Build and launch
let mut b = CloudHvBuilder::new(id);
b.disk(&prep.raw_disk);
if memory_mb > 0 {
b.memory_mb(memory_mb as u32);
}
if vcpus > 0 {
b.vcpus(vcpus as u32);
}
b.launch().map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("vm_easy_launch failed at launch: {}", e).into(),
rhai::Position::NONE,
))
})
}
pub fn register_cloudhv_builder_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
// Register type
engine.register_type_with_name::<CloudHvBuilder>("CloudHvBuilder");
// Factory
engine.register_fn("cloudhv_builder", builder_new);
// Chainable methods (functional style)
engine.register_fn("memory_mb", builder_memory_mb);
engine.register_fn("vcpus", builder_vcpus);
engine.register_fn("disk", builder_disk);
engine.register_fn("disk_from_flavor", builder_disk_from_flavor);
engine.register_fn("cmdline", builder_cmdline);
engine.register_fn("extra_arg", builder_extra_arg);
engine.register_fn("no_default_net", builder_no_default_net);
// Action
engine.register_fn("launch", builder_launch);
// One-shot wrapper
engine.register_fn("vm_easy_launch", vm_easy_launch);
Ok(())
}

View File

@@ -0,0 +1,48 @@
use crate::hostcheck::{host_check_deps, HostCheckReport};
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map};
fn report_to_map(r: &HostCheckReport) -> Map {
let mut m = Map::new();
m.insert("ok".into(), (r.ok as bool).into());
let mut crit = Array::new();
for s in &r.critical {
crit.push(s.clone().into());
}
m.insert("critical".into(), crit.into());
let mut opt = Array::new();
for s in &r.optional {
opt.push(s.clone().into());
}
m.insert("optional".into(), opt.into());
let mut notes = Array::new();
for s in &r.notes {
notes.push(s.clone().into());
}
m.insert("notes".into(), notes.into());
m
}
fn host_check() -> Result<Map, Box<EvalAltResult>> {
match host_check_deps() {
Ok(rep) => Ok(report_to_map(&rep)),
Err(e) => {
let mut m = Map::new();
m.insert("ok".into(), Dynamic::FALSE);
let mut crit = Array::new();
crit.push(format!("host_check failed: {}", e).into());
m.insert("critical".into(), crit.into());
m.insert("optional".into(), Array::new().into());
m.insert("notes".into(), Array::new().into());
Ok(m)
}
}
}
pub fn register_hostcheck_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
engine.register_fn("host_check", host_check);
Ok(())
}

View File

@@ -0,0 +1,98 @@
use crate::image_prep::{image_prepare, Flavor, ImagePrepOptions, NetPlanOpts};
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map};
fn parse_flavor(s: &str) -> Result<Flavor, Box<EvalAltResult>> {
match s {
"ubuntu" | "Ubuntu" | "UBUNTU" => Ok(Flavor::Ubuntu),
"alpine" | "Alpine" | "ALPINE" => Ok(Flavor::Alpine),
other => Err(Box::new(EvalAltResult::ErrorRuntime(
format!("image_prepare: invalid flavor '{}', allowed: ubuntu|alpine", other).into(),
rhai::Position::NONE,
))),
}
}
fn map_get_string(m: &Map, k: &str) -> Option<String> {
m.get(k).and_then(|v| if v.is_string() { Some(v.clone().cast::<String>()) } else { None })
}
fn map_get_bool(m: &Map, k: &str) -> Option<bool> {
m.get(k).and_then(|v| v.as_bool().ok())
}
fn net_from_map(m: Option<&Map>) -> NetPlanOpts {
let mut n = NetPlanOpts::default();
if let Some(mm) = m {
if let Some(b) = map_get_bool(mm, "dhcp4") {
n.dhcp4 = b;
}
if let Some(b) = map_get_bool(mm, "dhcp6") {
n.dhcp6 = b;
}
if let Some(s) = map_get_string(mm, "ipv6_addr") {
if !s.trim().is_empty() {
n.ipv6_addr = Some(s);
}
}
if let Some(s) = map_get_string(mm, "gw6") {
if !s.trim().is_empty() {
n.gw6 = Some(s);
}
}
}
n
}
fn image_prepare_rhai(opts: Map) -> Result<Map, Box<EvalAltResult>> {
// Required fields
let id = map_get_string(&opts, "id").ok_or_else(|| {
Box::new(EvalAltResult::ErrorRuntime(
"image_prepare: missing required field 'id'".into(),
rhai::Position::NONE,
))
})?;
if id.trim().is_empty() {
return Err(Box::new(EvalAltResult::ErrorRuntime(
"image_prepare: 'id' must not be empty".into(),
rhai::Position::NONE,
)));
}
let flavor_s = map_get_string(&opts, "flavor").unwrap_or_else(|| "ubuntu".into());
let flavor = parse_flavor(&flavor_s)?;
// Optional fields
let source = map_get_string(&opts, "source");
let target_dir = map_get_string(&opts, "target_dir");
let net = opts.get("net").and_then(|v| if v.is_map() { Some(v.clone().cast::<Map>()) } else { None });
let net_opts = net_from_map(net.as_ref());
let disable_cloud_init_net = map_get_bool(&opts, "disable_cloud_init_net").unwrap_or(true);
let o = ImagePrepOptions {
flavor,
id,
source,
target_dir,
net: net_opts,
disable_cloud_init_net,
};
let res = image_prepare(&o).map_err(|e| {
Box::new(EvalAltResult::ErrorRuntime(
format!("image_prepare failed: {}", e).into(),
rhai::Position::NONE,
))
})?;
let mut out = Map::new();
out.insert("raw_disk".into(), res.raw_disk.into());
out.insert("root_uuid".into(), res.root_uuid.into());
out.insert("boot_uuid".into(), res.boot_uuid.into());
out.insert("work_qcow2".into(), res.work_qcow2.into());
Ok(out)
}
pub fn register_image_prep_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
engine.register_fn("image_prepare", image_prepare_rhai);
Ok(())
}

View File

@@ -140,25 +140,25 @@ if !missing {
print("⚠️ API socket not found (continuing)");
}
print("\n--- Test 5: Stop VM (graceful) ---");
try {
cloudhv_vm_stop(vm_id, false);
print("✓ VM stop invoked (graceful)");
} catch (err) {
print(`⚠️ VM stop failed: ${err}`);
}
// print("\n--- Test 5: Stop VM (graceful) ---");
// try {
// cloudhv_vm_stop(vm_id, false);
// print("✓ VM stop invoked (graceful)");
// } catch (err) {
// print(`⚠️ VM stop failed: ${err}`);
// }
} else {
print("\n⚠ Skipping start/stop because required inputs are missing.");
}
print("\n--- Test 6: Delete VM definition ---");
try {
cloudhv_vm_delete(vm_id, false);
print("✓ VM deleted");
} catch (err) {
print(`❌ VM delete failed: ${err}`);
print("=== CloudHV Tests Aborted ===");
exit();
}
// print("\n--- Test 6: Delete VM definition ---");
// try {
// cloudhv_vm_delete(vm_id, false);
// print("✓ VM deleted");
// } catch (err) {
// print(`❌ VM delete failed: ${err}`);
// print("=== CloudHV Tests Aborted ===");
// exit();
// }
print("\n=== Cloud Hypervisor Basic Tests Completed ===");

View File

@@ -0,0 +1,148 @@
// Cloud Hypervisor diagnostic script
// Creates a VM, starts CH, verifies PID, API socket, ch-remote info, and tails logs.
print("=== CloudHV Diagnostic ===");
// Dependency check
let chs = which("cloud-hypervisor-static");
let chrs = which("ch-remote-static");
let ch_missing = (chs == () || chs == "");
let chr_missing = (chrs == () || chrs == "");
if ch_missing || chr_missing {
print("cloud-hypervisor-static and/or ch-remote-static not available - aborting.");
exit();
}
// Inputs
let firmware_path = "/tmp/virt_images/hypervisor-fw";
let disk_path = "/tmp/virt_images/noble-server-cloudimg-amd64.img";
if !exist(firmware_path) {
print(`Firmware not found: ${firmware_path}`);
exit();
}
if !exist(disk_path) {
print(`Disk image not found: ${disk_path}`);
exit();
}
// Unique ID
let rid = run_silent("date +%s%N");
let suffix = if rid.success && rid.stdout != "" { rid.stdout.trim() } else { "100000" };
let vm_id = `diagvm_${suffix}`;
// Socket path will be obtained from VM info (SAL populates spec.api_socket after start)
// Build minimal spec; let SAL decide the api_socket under the VM dir
let spec = #{
"id": vm_id,
"disk_path": disk_path,
"vcpus": 1,
"memory_mb": 512
};
spec.firmware_path = firmware_path;
fn pid_alive(p) {
if p == () { return false; }
// Use /proc to avoid noisy "kill: No such process" messages from kill -0
return exist(`/proc/${p}`);
}
fn tail_log(p, n) {
if exist(p) {
let r = run_silent(`tail -n ${n} ${p}`);
if r.success { print(r.stdout); } else { print(r.stderr); }
} else {
print(`Log file not found: ${p}`);
}
}
try {
print("--- Create VM spec ---");
let created = cloudhv_vm_create(spec);
print(`created: ${created}`);
} catch (err) {
print(`create failed: ${err}`);
exit();
}
// Read back info to get SAL-resolved log_file path
let info0 = cloudhv_vm_info(vm_id);
let log_file = info0.runtime.log_file;
// Rely on SAL to handle socket directory creation and stale-socket cleanup
print("--- Start VM ---");
try {
cloudhv_vm_start(vm_id);
print("start invoked");
} catch (err) {
print(`start failed: ${err}`);
tail_log(log_file, 200);
exit();
}
// Fetch PID and discover API socket path from updated spec
let info1 = cloudhv_vm_info(vm_id);
let pid = info1.runtime.pid;
let api_sock = info1.spec.api_socket;
print(`pid=${pid}`);
print(`api_sock_from_sal=${api_sock}`);
// Wait for socket file
let sock_ok = false;
for x in 0..50 {
if exist(api_sock) { sock_ok = true; break; }
sleep(1);
}
print(`api_sock_exists=${sock_ok} path=${api_sock}`);
// Probe ch-remote info
let info_ok = false;
let last_err = "";
if sock_ok {
for x in 0..20 {
let r = run_silent(`ch-remote-static --api-socket ${api_sock} info`);
if r.success {
info_ok = true;
print("ch-remote info OK");
break;
} else {
last_err = if r.stderr != "" { r.stderr } else { r.stdout };
sleep(1);
}
}
}
if !info_ok {
print("ch-remote info FAILED");
if last_err != "" { print(last_err); }
let alive = pid_alive(pid);
print(`pid_alive=${alive}`);
print("--- Last 200 lines of CH log ---");
tail_log(log_file, 200);
print("--- End of log ---");
} else {
print("--- Stop via SAL (force) ---");
try {
cloudhv_vm_stop(vm_id, true);
print("SAL stop invoked (force)");
} catch (err) {
print(`stop failed: ${err}`);
}
// wait for exit (check original PID)
for x in 0..30 {
if !pid_alive(pid) { break; }
sleep(1);
}
print(`pid_alive_after_stop=${pid_alive(pid)}`);
}
print("--- Cleanup ---");
try {
cloudhv_vm_delete(vm_id, false);
print("vm deleted");
} catch (err) {
print(`delete failed: ${err}`);
}
print("=== Diagnostic done ===");

View File

@@ -0,0 +1,533 @@
// Cloud-init NoCloud + host DHCP (dnsmasq) provisioning for Cloud Hypervisor
// - Accepts a user-supplied SSH public key
// - Ensures Ubuntu cloud image via SAL qcow2 builder
// - Sets up host bridge br0 and tap0, and runs an ephemeral dnsmasq bound to br0
// - Builds NoCloud seed ISO (cloud-localds preferred; genisoimage fallback)
// - Creates/starts a VM and prints SSH connection instructions
//
// Requirements (run this script with privileges that allow sudo commands):
// - cloud-hypervisor-static, ch-remote-static
// - cloud-image-utils (for cloud-localds) or genisoimage/xorriso
// - dnsmasq, iproute2
// - qemu tools already used by qcow2 builder
//
// Note: This script uses sudo for network and dnsmasq operations.
print("=== CloudHV + cloud-init + host DHCP (dnsmasq) ===");
// ----------- User input -----------
let user_pubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFyZJCEsvRc0eitsOoq+ywC5Lmqejvk3hXMVbO0AxPrd maxime@maxime-arch";
// Optional: choose boot method. If firmware is present in common locations, it will be used.
// Otherwise, if kernel_path exists, direct kernel boot will be used.
// If neither is found, the script will abort before starting the VM.
let firmware_path_override = ""; // e.g., "/usr/share/cloud-hypervisor/hypervisor-fw"
let kernel_path_override = ""; // e.g., "/path/to/vmlinux"
let kernel_cmdline_override = "console=ttyS0 reboot=k panic=1";
// Network parameters (local-only setup)
let bridge = "br0";
let br_cidr = "192.168.127.1/24";
let br_ip = "192.168.127.1";
let tap = "tap0";
let mac = "02:00:00:00:00:10"; // locally administered MAC
// Deterministic IP for the VM (dnsmasq will pin this MAC to this IP)
let vm_static_ip = "192.168.127.100";
// Paths
let base_dir = "/tmp/virt_images";
let seed_iso = `${base_dir}/seed.iso`;
let user_data = `${base_dir}/user-data`;
let meta_data = `${base_dir}/meta-data`;
let dnsmasq_pid = `${base_dir}/dnsmasq.pid`;
let dnsmasq_lease= `${base_dir}/dnsmasq.leases`;
let dnsmasq_log = `${base_dir}/dnsmasq.log`;
// ----------- Dependency checks -----------
print("\n--- Checking dependencies ---");
let chs = which("cloud-hypervisor-static");
let chrs = which("ch-remote-static");
let clds = which("cloud-localds");
let geniso = which("genisoimage");
let dns = which("dnsmasq");
let ipt = which("ip");
let missing = false;
if chs == () || chs == "" {
print("❌ cloud-hypervisor-static not found on PATH");
missing = true;
}
if chrs == () || chrs == "" {
print("❌ ch-remote-static not found on PATH");
missing = true;
}
if (clds == () || clds == "") && (geniso == () || geniso == "") {
print("❌ Neither cloud-localds nor genisoimage is available. Install cloud-image-utils or genisoimage.");
missing = true;
}
if dns == () || dns == "" {
print("❌ dnsmasq not found on PATH");
missing = true;
}
if ipt == () || ipt == "" {
print("❌ ip (iproute2) not found on PATH");
missing = true;
}
if missing {
print("=== Aborting due to missing dependencies ===");
exit();
}
print("✓ Dependencies look OK");
// ----------- Ensure base image -----------
print("\n--- Ensuring Ubuntu 24.04 cloud image ---");
let base;
try {
// Avoid resizing to prevent GPT backup-header mismatch that can break early boot on some kernels/firmware.
// Use 0 to keep the original image size; cloud-init/cloud-image tooling can grow the FS later if needed.
base = qcow2_build_ubuntu_24_04_base(base_dir, 0);
} catch (err) {
print(`❌ Failed to build/ensure base image: ${err}`);
exit();
}
let disk_path = base.base_image_path;
print(`✓ Using base image: ${disk_path}`);
// ----------- Host networking (bridge + tap) -----------
print("\n--- Configuring host networking (bridge + tap) ---");
// Idempotent: create br0 if missing; assign IP if not present; set up
let net_script = `
sudo ip link show ${bridge} >/dev/null 2>&1 || sudo ip link add ${bridge} type bridge
ip addr show dev ${bridge} | grep -q "${br_cidr}" || sudo ip addr add ${br_cidr} dev ${bridge}
sudo ip link set ${bridge} up
# Remove any stale TAP to avoid "Resource busy" when CH configures it
if ip link show ${tap} >/dev/null 2>&1; then
sudo ip link set ${tap} down || true
sudo ip link del ${tap} || true
fi
`;
run_silent(net_script);
print(`✓ Bridge ${bridge} and tap ${tap} configured`);
print("Note: NO-CARRIER on a bridge/tap without a peer is normal; DHCP will work once the guest brings its interface up.");
// ----------- Start/ensure dnsmasq on br0 -----------
print("\n--- Ensuring dnsmasq serving DHCP on the bridge ---");
// Ensure log/lease directory exists before starting dnsmasq
run_silent(`mkdir -p ${base_dir}`);
// If an instance with our pid-file is running, keep it; otherwise start a new one bound to br0.
// Use --port=0 to avoid DNS port conflicts; we only need DHCP here.
let dns_state = run_silent(`
if [ -f ${dnsmasq_pid} ] && ps -p $(cat ${dnsmasq_pid}) >/dev/null 2>&1; then
echo RUNNING
elif pgrep -f "dnsmasq .*--interface=${bridge}" >/dev/null 2>&1; then
echo RUNNING
elif [ -f ${dnsmasq_log} ] && grep -q "sockets bound exclusively to interface ${bridge}" ${dnsmasq_log}; then
echo RUNNING
else
echo STOPPED
fi
`);
let need_start = true;
if dns_state.success && dns_state.stdout.trim() == "RUNNING" {
print("✓ dnsmasq already running (pid file present and alive)");
need_start = false;
} else {
// Clean stale files
run_silent(`rm -f ${dnsmasq_pid} ${dnsmasq_lease}`);
}
if need_start {
// Start dnsmasq detached and force a clean, self-contained configuration.
// - Use --conf-file=/dev/null to avoid system config conflicts
// - Log directly via --log-facility to capture early failures
// - Run under current privileges (herodo is invoked with sudo)
let r = run_silent(`
: > ${dnsmasq_log}
nohup dnsmasq \
--conf-file=/dev/null \
--log-facility=${dnsmasq_log} \
--log-dhcp \
--user=root \
--group=root \
--port=0 \
--bind-interfaces \
--except-interface=lo \
--interface=${bridge} \
--dhcp-range=192.168.127.100,192.168.127.200,12h \
--dhcp-option=option:router,${br_ip} \
--dhcp-option=option:dns-server,1.1.1.1 \
--dhcp-host=${mac},${vm_static_ip} \
--pid-file=${dnsmasq_pid} \
--dhcp-leasefile=${dnsmasq_lease} &
`);
if !r.success {
print(`❌ Failed to start dnsmasq. Check log: ${dnsmasq_log}`);
let t = run_silent(`
if [ -f ${dnsmasq_log} ]; then
tail -n 200 ${dnsmasq_log}
fi
`);
if t.success && t.stdout.trim() != "" { print(t.stdout); }
exit();
}
// Robust readiness: wait up to 10s for pidfile OR process OR log pattern
let ready = run_silent(`
for i in $(seq 1 10); do
if [ -f ${dnsmasq_pid} ] && ps -p $(cat ${dnsmasq_pid}) >/dev/null 2>&1; then
echo OK; exit 0
fi
if pgrep -f "dnsmasq .*--interface=${bridge}" >/dev/null 2>&1; then
echo OK; exit 0
fi
if [ -f ${dnsmasq_log} ] && grep -q "sockets bound exclusively to interface ${bridge}" ${dnsmasq_log}; then
echo OK; exit 0
fi
sleep 1
done
echo FAIL
`);
if !(ready.success && ready.stdout.contains("OK")) {
print(`❌ dnsmasq did not come up. See ${dnsmasq_log}`);
let t = run_silent(`
if [ -f ${dnsmasq_log} ]; then
tail -n 200 ${dnsmasq_log}
fi
`);
if t.success && t.stdout.trim() != "" { print(t.stdout); }
exit();
}
print("✓ dnsmasq started (DHCP on br0)");
}
// ----------- Build cloud-init NoCloud seed (user-data/meta-data) -----------
print("\n--- Building NoCloud seed (user-data, meta-data) ---");
run_silent(`mkdir -p ${base_dir}`);
run_silent(`chmod 1777 ${base_dir}`);
// Compose user-data and meta-data content
let ud = `#cloud-config
users:
- name: ubuntu
groups: [adm, cdrom, dialout, lxd, plugdev, sudo]
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
lock_passwd: true
ssh_authorized_keys:
- ${user_pubkey}
ssh_pwauth: false
package_update: true
`;
let md = `instance-id: iid-ubuntu-noble-001
local-hostname: noblevm
`;
// Write files via heredoc
let wr1 = run_silent(`
cat > ${user_data} <<'EOF'
${ud}
EOF
`);
if !wr1.success { print(`❌ Failed to write ${user_data}`); exit(); }
let wr2 = run_silent(`
cat > ${meta_data} <<'EOF'
${md}
EOF
`);
if !wr2.success { print(`❌ Failed to write ${meta_data}`); exit(); }
// Provide cloud-init network-config to ensure the NIC with our MAC requests DHCP
let net_config = `${base_dir}/network-config`;
let nc = `version: 2
ethernets:
nic0:
match:
macaddress: ${mac}
set-name: eth0
renderer: networkd
dhcp4: true
`;
let wr3 = run_silent(`
cat > ${net_config} <<'EOF'
${nc}
EOF
`);
if !wr3.success { print(`❌ Failed to write ${net_config}`); exit(); }
// Build seed ISO (prefer cloud-localds)
let built = false;
if !(clds == () || clds == "") {
let r = run_silent(`sudo cloud-localds --network-config ${net_config} ${seed_iso} ${user_data} ${meta_data}`);
if r.success {
built = true;
}
}
if !built {
if geniso == () || geniso == "" {
print("❌ Neither cloud-localds nor genisoimage succeeded/available to build seed.iso");
exit();
}
let r2 = run_silent(`sudo genisoimage -output ${seed_iso} -volid cidata -joliet -rock ${user_data} ${meta_data} ${net_config}`);
if !r2.success {
print("❌ genisoimage failed to create seed.iso");
exit();
}
}
print(`✓ Seed ISO: ${seed_iso}`);
// ----------- Determine boot method (firmware or kernel) -----------
print("\n--- Determining boot method ---");
let firmware_path = "";
if firmware_path_override != "" && exist(firmware_path_override) {
firmware_path = firmware_path_override;
} else {
let candidates = [
"/usr/local/share/cloud-hypervisor/hypervisor-fw",
"/usr/share/cloud-hypervisor/hypervisor-fw",
"/usr/lib/cloud-hypervisor/hypervisor-fw",
"/tmp/virt_images/hypervisor-fw"
];
for p in candidates {
if exist(p) { firmware_path = p; break; }
}
}
let kernel_path = "";
if kernel_path_override != "" && exist(kernel_path_override) {
kernel_path = kernel_path_override;
}
if firmware_path == "" && kernel_path == "" {
print("❌ No firmware_path or kernel_path found. Set firmware_path_override or kernel_path_override at top and re-run.");
exit();
}
if firmware_path != "" {
print(`✓ Using firmware boot: ${firmware_path}`);
} else {
print(`✓ Using direct kernel boot: ${kernel_path}`);
}
// ----------- Create and start VM -----------
print("\n--- Creating and starting VM ---");
let rid = run_silent("date +%s%N");
let suffix = if rid.success && rid.stdout.trim() != "" { rid.stdout.trim() } else { "100000" };
let vm_id = `noble_vm_${suffix}`;
// Use a unique TAP per run to avoid "Resource busy" conflicts.
// Keep name <= 15 chars (Linux IFNAMSIZ), e.g. "tap-abcdef".
let tn = run_silent("od -An -N3 -tx1 /dev/urandom | tr -d '[:space:]'");
if tn.success && tn.stdout.trim() != "" {
tap = `tap-${tn.stdout.trim()}`;
} else {
tap = "tap-abcd01";
}
let spec = #{
"id": vm_id,
"disk_path": disk_path,
"api_socket": "",
"vcpus": 2,
"memory_mb": 2048
};
if firmware_path != "" {
spec.firmware_path = firmware_path;
} else {
spec.kernel_path = kernel_path;
spec.cmdline = kernel_cmdline_override;
}
spec.extra_args = [
"--disk", `path=${seed_iso},readonly=true`,
"--net", `tap=${tap},mac=${mac}`
];
try {
let created = cloudhv_vm_create(spec);
print(`✓ VM created: ${created}`);
} catch (err) {
print(`❌ VM create failed: ${err}`);
exit();
}
try {
cloudhv_vm_start(vm_id);
print("✓ VM start invoked");
// After CH creates/opens the TAP, attach it to the bridge to allow DHCP broadcast to reach dnsmasq on br0.
// Avoid racing with CH tap configuration: wait briefly, then attempt attach.
let post_net = `
# Give CH time to finish configuring tap to avoid EBUSY
sleep 1
for i in $(seq 1 30); do
if ip link show ${tap} >/dev/null 2>&1; then
# Enslave to bridge and ensure up; ignore errors (idempotent)
sudo ip link set ${tap} master ${bridge} 2>/dev/null || true
sudo ip link set ${tap} up 2>/dev/null || true
break
fi
sleep 1
done
`;
run_silent(post_net);
} catch (err) {
print(`❌ VM start failed: ${err}`);
exit();
}
// ----------- Wait for DHCP lease and print access info -----------
print("\n--- Waiting for DHCP lease from dnsmasq ---");
let vm_ip = "";
// First try deterministic fixed IP via ping (dnsmasq pins MAC->IP)
for i in 0..60 {
// Use a plain command (no shell operators). Success indicates reachability.
let pr = run_silent(`ping -c1 -W1 -I ${bridge} ${vm_static_ip}`);
if pr.success {
vm_ip = vm_static_ip;
break;
}
sleep(1);
}
for i in 0..180 {
sleep(1);
// Discover and validate IPv4; prefer exact MAC match across common dnsmasq lease locations
let lr = run_silent(`
valid_ipv4() { echo "$1" | grep -Eo '^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$' || true; }
# Candidate lease files (add more if your distro uses a different path)
LEASE_FILES="${dnsmasq_lease} /var/lib/misc/dnsmasq.leases /var/lib/dnsmasq/dnsmasq.leases"
# Include any runtime leases under /run/dnsmasq if present
if ls /run/dnsmasq/*.leases >/dev/null 2>&1; then
LEASE_FILES="$LEASE_FILES $(ls /run/dnsmasq/*.leases 2>/dev/null)"
fi
# 1) Try to find by exact MAC across all known lease files
for f in $LEASE_FILES; do
[ -f "$f" ] || continue
ip="$(awk -v m="${mac}" '$2==m{ip=$3} END{if(ip!="") print ip}' "$f")"
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
done
# 2) Fallback: last IP in our br0 subnet across all lease files
for f in $LEASE_FILES; do
[ -f "$f" ] || continue
ip="$(awk '$3 ~ /^192\\.168\\.127\\./ {ip=$3} END{if(ip!="") print ip}' "$f")"
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
done
# 3) Fallback: SAL default subnet (172.30.0.0/24) across all lease files
for f in $LEASE_FILES; do
[ -f "$f" ] || continue
ip="$(awk '$3 ~ /^172\\.30\\.0\\./ {ip=$3} END{if(ip!="") print ip}' "$f")"
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
done
# 4) ARP gleaning on likely bridges (br0 first, then br-hero) for the known MAC
for dev in ${bridge} br-hero; do
if ip -o link show "$dev" >/dev/null 2>&1; then
ip="$(ip neigh show dev "$dev" | awk '$0 ~ /lladdr ${mac}/ {print $1}' | tail -n1)"
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
fi
done
# 5) As a last resort, ARP any 192.168.127.x seen on br0
if ip -o link show ${bridge} >/dev/null 2>&1; then
ip="$(ip neigh show dev ${bridge} | awk '$1 ~ /^192\\.168\\.127\\./ {print $1}' | tail -n1)"
if [ -n "$ip" ] && [ -n "$(valid_ipv4 "$ip")" ]; then echo "$ip"; exit 0; fi
fi
# No valid IP yet
true
`);
if lr.success {
let ip = lr.stdout.trim();
if ip != "" {
vm_ip = ip;
break;
}
}
}
// Fallback: parse cloud-hypervisor console log for an IPv4 on our expected subnets
let info2 = cloudhv_vm_info(vm_id);
let log_path = info2.runtime.log_file;
if vm_ip == "" {
let cp = run_silent(`
if [ -f ${log_path} ]; then
grep -Eo '([0-9]+\\.){3}[0-9]+' ${log_path} | grep -E '^(192\\.168\\.127|172\\.30\\.0)\\.' | tail -n1
fi
`);
if cp.success {
let ip2 = cp.stdout.trim();
if ip2 != "" {
vm_ip = ip2;
}
}
}
if vm_ip == "" {
// Actively populate ARP neighbor tables by sweeping likely subnets
run_silent(`
for ip in $(seq 100 200); do ping -c1 -W1 -I ${bridge} 192.168.127.$ip >/dev/null 2>&1 || true; done
if ip -o link show br-hero >/dev/null 2>&1; then
for ip in $(seq 50 250); do ping -c1 -W1 -I br-hero 172.30.0.$ip >/dev/null 2>&1 || true; done
fi
`);
// Re-check after ARP sweep using the same validated discovery logic
let lr2 = run_silent(`
get_ip_from_leases() {
f="$1"; prefix="$2";
if [ -f "$f" ]; then
awk -v pfx="$prefix" '$3 ~ ("^" pfx) {ip=$3} END{if(ip!="") print ip}' "$f"
fi
}
valid_ipv4() {
echo "$1" | grep -Eo '^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$' || true
}
cand="$(get_ip_from_leases ${dnsmasq_lease} "192.168.127.")"
if [ -n "$cand" ] && [ -n "$(valid_ipv4 "$cand")" ]; then echo "$cand"; exit 0; fi
cand="$(get_ip_from_leases /var/lib/misc/dnsmasq.leases "192.168.127.")"
if [ -n "$cand" ] && [ -n "$(valid_ipv4 "$cand")" ]; then echo "$cand"; exit 0; fi
cand="$(get_ip_from_leases /var/lib/misc/dnsmasq.leases "172.30.0.")"
if [ -n "$cand" ] && [ -n "$(valid_ipv4 "$cand")" ]; then echo "$cand"; exit 0; fi
cand="$(ip neigh show dev ${bridge} | awk '$0 ~ /lladdr ${mac}/ {print $1}' | tail -n1)"
if [ -n "$cand" ] && [ -n "$(valid_ipv4 "$cand")" ]; then echo "$cand"; exit 0; fi
true
`);
if lr2.success {
let ip2 = lr2.stdout.trim();
if ip2 != "" {
vm_ip = ip2;
}
}
}
/* Final sanity: ensure vm_ip is a valid IPv4 dotted-quad before printing */
let _chk = run_silent(`echo "${vm_ip}" | grep -Eo '^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$' || true`);
if !(_chk.success && _chk.stdout.trim() != "") { vm_ip = ""; }
if vm_ip == "" {
print("❌ Could not discover VM IP after 180 seconds.");
print("Diagnostics you can run now:");
print(` tail -n +1 ${dnsmasq_lease}`);
print(" cat /var/lib/misc/dnsmasq.leases | tail -n 5");
print(` ip neigh show dev ${bridge} | grep '${mac}' || true`);
print("Exiting without SSH command because the IP could not be determined.");
exit();
} else {
print(`✓ Lease acquired: ${vm_ip}`);
print("\nSSH command (key-only; default user 'ubuntu'):");
print(`ssh -o StrictHostKeyChecking=no ubuntu@${vm_ip}`);
}
print("\n--- VM access details ---");
print(`VM ID: ${vm_id}`);
let info = cloudhv_vm_info(vm_id);
print(`API socket: ${info.spec.api_socket}`);
print(`Console log: ${info.runtime.log_file}`);
print(`Bridge: ${bridge} at ${br_ip}, TAP: ${tap}, MAC: ${mac}`);
print(`Seed: ${seed_iso}`);
/* SSH command already printed above when lease was acquired */
print("\nCleanup hints (manual):");
print(`- Stop dnsmasq: sudo kill \$(cat ${dnsmasq_pid})`);
print(`- Remove TAP: sudo ip link set ${tap} down; sudo ip link del ${tap}`);
print(" (Keep the bridge if you will reuse it.)");
print("\n=== Completed ===");

View File

@@ -0,0 +1,311 @@
// Create and boot an Ubuntu 24.04 VM with cloud-init SSH key injection on Cloud Hypervisor
// - Uses qcow2 base image builder from SAL
// - Builds a NoCloud seed ISO embedding your SSH public key
// - Starts the VM; host networking prerequisites (bridge/dnsmasq/nftables) are ensured by CloudHV SAL
// - Attempts to discover the VM IP from dnsmasq leases and prints SSH instructions
//
// Requirements on host:
// - cloud-hypervisor-static, ch-remote-static
// - cloud-localds (preferred) OR genisoimage
// - qemu-img (already used by qcow2 SAL)
// - dnsmasq + nftables (will be handled by SAL during vm_start)
//
// Note:
// - SAL CloudHV networking will create a bridge br-hero, enable dnsmasq, and add a NAT rule via nftables
// - This script does NOT manage host networking; it relies on SAL to do so during vm_start()
print("=== CloudHV Ubuntu 24.04 with SSH key (cloud-init) ===");
// ---------- Inputs ----------
let user_pubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFyZJCEsvRc0eitsOoq+ywC5Lmqejvk3hXMVbO0AxPrd maxime@maxime-arch";
// Optional overrides for boot method (if firmware is present, it will be preferred)
let firmware_path_override = ""; // e.g., "/usr/share/cloud-hypervisor/hypervisor-fw"
let kernel_path_override = ""; // e.g., "/path/to/vmlinux"
let kernel_cmdline = "console=ttyS0 reboot=k panic=1";
// Cloud-init hostname and instance id (used to identify leases reliably)
let cloudinit_hostname = "noblevm";
let cloudinit_instance_id = "iid-ubuntu-noble-ssh";
// Paths
let base_dir = "/tmp/virt_images";
let seed_iso = `${base_dir}/seed-ssh.iso`;
let user_data = `${base_dir}/user-data`;
let meta_data = `${base_dir}/meta-data`;
// ---------- Dependency checks ----------
print("\n--- Checking dependencies ---");
let chs = which("cloud-hypervisor-static");
let chrs = which("ch-remote-static");
let clds = which("cloud-localds");
let geniso = which("genisoimage");
let qemu = which("qemu-img");
let missing = false;
if chs == () || chs == "" {
print("❌ cloud-hypervisor-static not found on PATH");
missing = true;
}
if chrs == () || chrs == "" {
print("❌ ch-remote-static not found on PATH");
missing = true;
}
if (clds == () || clds == "") && (geniso == () || geniso == "") {
print("❌ Neither cloud-localds nor genisoimage is available. Install cloud-image-utils or genisoimage.");
missing = true;
}
if qemu == () || qemu == "" {
print("❌ qemu-img not found (required by base image builder)");
missing = true;
}
if missing {
print("=== Aborting due to missing dependencies ===");
exit();
}
print("✓ Dependencies look OK");
// ---------- Ensure base image ----------
print("\n--- Ensuring Ubuntu 24.04 cloud image ---");
let base;
try {
// Resize to e.g. 10 GiB sparse (adjust as needed)
base = qcow2_build_ubuntu_24_04_base(base_dir, 10);
} catch (err) {
print(`❌ Failed to build/ensure base image: ${err}`);
exit();
}
let disk_path = base.base_image_path;
print(`✓ Using base image: ${disk_path}`);
// ---------- Build cloud-init NoCloud seed (user-data/meta-data) ----------
print("\n--- Building NoCloud seed (SSH key) ---");
run_silent(`mkdir -p ${base_dir}`);
// Compose user-data and meta-data
let ud = `#cloud-config
users:
- name: ubuntu
groups: [adm, cdrom, dialout, lxd, plugdev, sudo]
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
lock_passwd: true
ssh_authorized_keys:
- ${user_pubkey}
ssh_pwauth: false
package_update: true
`;
let md = `instance-id: ${cloudinit_instance_id}
local-hostname: ${cloudinit_hostname}
`;
// Write files
let wr1 = run_silent(`/bin/bash -lc "cat > ${user_data} <<'EOF'
${ud}
EOF"`);
if !wr1.success { print(`❌ Failed to write ${user_data}`); exit(); }
let wr2 = run_silent(`/bin/bash -lc "cat > ${meta_data} <<'EOF'
${md}
EOF"`);
if !wr2.success { print(`❌ Failed to write ${meta_data}`); exit(); }
// Build seed ISO (prefer cloud-localds)
let built = false;
if !(clds == () || clds == "") {
let r = run_silent(`cloud-localds ${seed_iso} ${user_data} ${meta_data}`);
if r.success { built = true; }
}
if !built {
if geniso == () || geniso == "" {
print("❌ Neither cloud-localds nor genisoimage available to build seed.iso");
exit();
}
let r2 = run_silent(`genisoimage -output ${seed_iso} -volid cidata -joliet -rock ${user_data} ${meta_data}`);
if !r2.success {
print("❌ genisoimage failed to create seed.iso");
exit();
}
}
print(`✓ Seed ISO: ${seed_iso}`);
// ---------- Determine boot method (firmware or kernel) ----------
print("\n--- Determining boot method ---");
let firmware_path = "";
if firmware_path_override != "" && exist(firmware_path_override) {
firmware_path = firmware_path_override;
} else {
let candidates = [
"/usr/local/share/cloud-hypervisor/hypervisor-fw",
"/usr/share/cloud-hypervisor/hypervisor-fw",
"/usr/lib/cloud-hypervisor/hypervisor-fw",
"/tmp/virt_images/hypervisor-fw"
];
for p in candidates {
if exist(p) { firmware_path = p; break; }
}
}
let kernel_path = "";
if kernel_path_override != "" && exist(kernel_path_override) {
kernel_path = kernel_path_override;
}
if firmware_path == "" && kernel_path == "" {
print("❌ No firmware_path or kernel_path found. Set firmware_path_override or kernel_path_override and re-run.");
exit();
}
if firmware_path != "" {
print(`✓ Using firmware boot: ${firmware_path}`);
} else {
print(`✓ Using direct kernel boot: ${kernel_path}`);
}
// ---------- Create and start VM ----------
print("\n--- Creating and starting VM ---");
let rid = run_silent("date +%s%N");
// Make suffix robust even if date outputs nothing
let suffix = "100000";
if rid.success {
let t = rid.stdout.trim();
if t != "" { suffix = t; }
}
let vm_id = `noble_ssh_${suffix}`;
let spec = #{
"id": vm_id,
"disk_path": disk_path,
"api_socket": "",
"vcpus": 2,
"memory_mb": 2048
};
if firmware_path != "" {
spec.firmware_path = firmware_path;
} else {
spec.kernel_path = kernel_path;
spec.cmdline = kernel_cmdline;
}
// Attach the NoCloud seed ISO as a read-only disk
spec.extra_args = [
"--disk", `path=${seed_iso},readonly=true`
];
try {
let created = cloudhv_vm_create(spec);
print(`✓ VM created: ${created}`);
} catch (err) {
print(`❌ VM create failed: ${err}`);
exit();
}
try {
cloudhv_vm_start(vm_id);
print("✓ VM start invoked");
} catch (err) {
print(`❌ VM start failed: ${err}`);
exit();
}
// ---------- Wait for VM API socket and probe readiness ----------
print("\n--- Waiting for VM API socket ---");
let api_sock = "";
// Discover socket path (from SAL or common defaults)
let fallback_candidates = [
`/root/hero/virt/vms/${vm_id}/api.sock`,
`/home/maxime/hero/virt/vms/${vm_id}/api.sock`
];
// First, try to detect the socket on disk with a longer timeout
let sock_exists = false;
for i in 0..180 {
sleep(1);
let info = cloudhv_vm_info(vm_id);
api_sock = info.spec.api_socket;
if api_sock == () || api_sock == "" {
for cand in fallback_candidates {
if exist(cand) { api_sock = cand; break; }
}
}
if api_sock != () && api_sock != "" && exist(api_sock) {
sock_exists = true;
break;
}
}
// Regardless of filesystem existence, also try probing the API directly
let api_ok = false;
if api_sock != () && api_sock != "" {
for i in 0..60 {
let r = run_silent(`ch-remote-static --api-socket ${api_sock} info`);
if r.success { api_ok = true; break; }
sleep(1);
}
}
if api_ok {
print("✓ VM API reachable");
} else if sock_exists {
print("⚠️ VM API socket exists but API not reachable yet");
} else {
print("⚠️ VM API socket not found yet; proceeding");
let info_dbg = cloudhv_vm_info(vm_id);
let log_path = info_dbg.runtime.log_file;
if exist(log_path) {
let t = run_silent(`tail -n 120 ${log_path}`);
if t.success && t.stdout.trim() != "" {
print("\n--- Last 120 lines of console log (diagnostics) ---");
print(t.stdout);
print("--- End of console log ---");
}
} else {
print(`(console log not found at ${log_path})`);
}
}
// ---------- Discover VM IP from dnsmasq leases ----------
print("\n--- Discovering VM IP (dnsmasq leases) ---");
// SAL enables system dnsmasq for br-hero by default; leases usually at /var/lib/misc/dnsmasq.leases
let leases_paths = [
"/var/lib/misc/dnsmasq.leases",
"/var/lib/dnsmasq/dnsmasq.leases"
];
let vm_ip = "";
for path in leases_paths {
if !exist(path) { continue; }
for i in 0..120 {
sleep(1);
// Pure awk (no nested shells/pipes). Keep last IP matching hostname.
let lr = run_silent(`awk -v host="${cloudinit_hostname}" '($4 ~ host){ip=$3} END{if(ip!=\"\") print ip}' ${path}`);
if lr.success {
let ip = lr.stdout.trim();
if ip != "" {
vm_ip = ip;
break;
}
}
}
if vm_ip != "" { break; }
}
// ---------- Output connection details ----------
print("\n--- VM access details ---");
let info = cloudhv_vm_info(vm_id);
print(`VM ID: ${vm_id}`);
if info.runtime.pid != () {
print(`PID: ${info.runtime.pid}`);
}
print(`Status: ${info.runtime.status}`);
print(`API socket: ${info.spec.api_socket}`);
print(`Console log: ${info.runtime.log_file}`);
print(`Seed ISO: ${seed_iso}`);
print(`Hostname: ${cloudinit_hostname}`);
if vm_ip != "" {
print("\nSSH command (default user 'ubuntu'):");
print(`ssh -o StrictHostKeyChecking=no ubuntu@${vm_ip}`);
} else {
print("\n⚠ Could not resolve VM IP yet from leases. Try later:");
print(" - Check leases: sudo cat /var/lib/misc/dnsmasq.leases | grep noblevm");
print(" - Or find on bridge (example): ip -4 neigh show dev br-hero");
print(" - Then SSH: ssh -o StrictHostKeyChecking=no ubuntu@<IP>");
}
print("\n=== Completed: Ubuntu VM launched with SSH key via cloud-init ===");

View File

@@ -0,0 +1,229 @@
// End-to-end smoke test for the new qcow2 + cloud-hypervisor refactor
// This script executes in logical phases so we can see clearly what works.
//
// Phases:
// 1) Host preflight check
// 2) Image preparation (Ubuntu) -> raw disk
// 3) Launch VM via builder using prepared raw disk
// 4) Inspect VM info, list VMs
// 5) Stop & delete VM
// 6) Launch VM via one-shot wrapper vm_easy_launch
// 7) Inspect VM info, list VMs
// 8) Stop & delete VM
//
// Notes:
// - Run as root on the host (required for NBD/mount/networking).
// - Base images expected at:
// /images/noble-server-cloudimg-amd64.img
// /images/alpine-virt-cloudimg-amd64.qcow2 (Alpine prepare not implemented yet)
// /images/hypervisor-fw (firmware binary used via --kernel)
// - Network defaults: IPv4 NAT (dnsmasq DHCP) + IPv6 routed over Mycelium (RA/DHCPv6). No static IPv6 is written into the guest; it autoconfigures via RA.
//
// Conventions:
// - Functional builder chaining: b = memory_mb(b, 4096), etc.
// - Each phase prints a banner and either "OK" or "FAILED" with detailed error message.
fn banner(s) {
print("==================================================");
print(s);
print("==================================================");
}
fn ok(s) {
print("[OK] " + s);
}
fn fail(msg) {
print("[FAILED] " + msg);
}
fn dump_map(m) {
// simple pretty printer for small maps
for k in m.keys() {
print(" " + k + ": " + m[k].to_string());
}
}
fn dump_array(a) {
let i = 0;
for x in a {
print(" - " + x.to_string());
}
}
// ------------------------------------------------------------------------------------
// Phase 1: Host preflight check
// ------------------------------------------------------------------------------------
banner("PHASE 1: host_check()");
let hc = host_check();
if !(hc.ok == true) {
fail("host_check indicates missing dependencies; details:");
print("critical:");
dump_array(hc.critical);
print("optional:");
dump_array(hc.optional);
print("notes:");
dump_array(hc.notes);
// Short-circuit: nothing else will work without deps
throw "Missing critical host dependencies";
} else {
ok("host_check passed");
}
// ------------------------------------------------------------------------------------
// Phase 2: Image preparation for Ubuntu
// - produces a per-VM raw disk in $HOME/hero/virt/vms/<id>/disk.raw
// ------------------------------------------------------------------------------------
banner("PHASE 2: image_prepare (Ubuntu) -> raw disk");
let vmA = "vm-e2e-a";
let prep_opts = #{
id: vmA,
flavor: "ubuntu",
// source: optional override, default uses /images/noble-server-cloudimg-amd64.img
// target_dir: optional override, default $HOME/hero/virt/vms/<id>
disable_cloud_init_net: true,
};
let prep_res = ();
let prep_ok = false;
try {
prep_res = image_prepare(prep_opts);
ok("image_prepare returned:");
dump_map(prep_res);
if prep_res.raw_disk == () {
fail("prep_res.raw_disk is UNIT; expected string path");
} else {
ok("raw_disk: " + prep_res.raw_disk);
prep_ok = true;
}
} catch (e) {
fail("image_prepare failed: " + e.to_string());
}
if !(prep_ok) {
throw "Stopping due to image_prepare failure";
}
// ------------------------------------------------------------------------------------
// Phase 3: Launch VM via builder using the prepared raw disk
// ------------------------------------------------------------------------------------
banner("PHASE 3: Launch via cloudhv_builder (disk from Phase 2)");
let b = cloudhv_builder(vmA);
let b = disk(b, prep_res.raw_disk);
let b = memory_mb(b, 4096);
let b = vcpus(b, 2);
// Optional extras:
// let b = extra_arg(b, "--serial"); let b = extra_arg(b, "tty");
// let b = no_default_net(b);
let vm_id_a = "";
try {
vm_id_a = launch(b);
ok("builder.launch started VM id: " + vm_id_a);
} catch (e) {
fail("builder.launch failed: " + e.to_string());
throw "Stopping due to launch failure for vm-e2e-a";
}
// ------------------------------------------------------------------------------------
// Phase 4: Inspect VM info, list VMs
// ------------------------------------------------------------------------------------
banner("PHASE 4: cloudhv_vm_info / cloudhv_vm_list");
try {
let info_a = cloudhv_vm_info(vm_id_a);
ok("cloudhv_vm_info:");
dump_map(info_a);
} catch (e) {
fail("cloudhv_vm_info failed: " + e.to_string());
}
try {
let vms = cloudhv_vm_list();
ok("cloudhv_vm_list count = " + vms.len.to_string());
} catch (e) {
fail("cloudhv_vm_list failed: " + e.to_string());
}
sleep(1000000);
// ------------------------------------------------------------------------------------
// Phase 5: Stop & delete VM A
// ------------------------------------------------------------------------------------
banner("PHASE 5: Stop & delete VM A");
try {
cloudhv_vm_stop(vm_id_a, false);
ok("cloudhv_vm_stop graceful OK");
} catch (e) {
fail("cloudhv_vm_stop (graceful) failed: " + e.to_string() + " -> trying force");
try {
cloudhv_vm_stop(vm_id_a, true);
ok("cloudhv_vm_stop force OK");
} catch (e2) {
fail("cloudhv_vm_stop force failed: " + e2.to_string());
}
}
try {
cloudhv_vm_delete(vm_id_a, true);
ok("cloudhv_vm_delete OK (deleted disks)");
} catch (e) {
fail("cloudhv_vm_delete failed: " + e.to_string());
}
// ------------------------------------------------------------------------------------
// Phase 6: Launch VM via one-shot wrapper vm_easy_launch()
// ------------------------------------------------------------------------------------
banner("PHASE 6: vm_easy_launch for VM B");
let vmB = "vm-e2e-b";
let vm_id_b = "";
try {
vm_id_b = vm_easy_launch("ubuntu", vmB, 4096, 2);
ok("vm_easy_launch started VM id: " + vm_id_b);
} catch (e) {
fail("vm_easy_launch failed: " + e.to_string());
throw "Stopping due to vm_easy_launch failure";
}
// ------------------------------------------------------------------------------------
// Phase 7: Inspect VM B info, list VMs
// ------------------------------------------------------------------------------------
banner("PHASE 7: Inspect VM B");
try {
let info_b = cloudhv_vm_info(vm_id_b);
ok("cloudhv_vm_info (B):");
dump_map(info_b);
} catch (e) {
fail("cloudhv_vm_info (B) failed: " + e.to_string());
}
try {
let vms2 = cloudhv_vm_list();
ok("cloudhv_vm_list count = " + vms2.len.to_string());
} catch (e) {
fail("cloudhv_vm_list failed: " + e.to_string());
}
// ------------------------------------------------------------------------------------
// Phase 8: Stop & delete VM B
// ------------------------------------------------------------------------------------
banner("PHASE 8: Stop & delete VM B");
try {
cloudhv_vm_stop(vm_id_b, false);
ok("cloudhv_vm_stop (B) graceful OK");
} catch (e) {
fail("cloudhv_vm_stop (B) graceful failed: " + e.to_string() + " -> trying force");
try {
cloudhv_vm_stop(vm_id_b, true);
ok("cloudhv_vm_stop (B) force OK");
} catch (e2) {
fail("cloudhv_vm_stop (B) force failed: " + e2.to_string());
}
}
try {
cloudhv_vm_delete(vm_id_b, true);
ok("cloudhv_vm_delete (B) OK (deleted disks)");
} catch (e) {
fail("cloudhv_vm_delete (B) failed: " + e.to_string());
}
banner("DONE: All phases executed");