cloud-hypervisor SAL + rhai test script for it
This commit is contained in:
459
packages/system/virt/src/cloudhv/mod.rs
Normal file
459
packages/system/virt/src/cloudhv/mod.rs
Normal file
@@ -0,0 +1,459 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use sal_os;
|
||||
use sal_process;
|
||||
|
||||
/// Error type for Cloud Hypervisor operations
|
||||
#[derive(Debug)]
|
||||
pub enum CloudHvError {
|
||||
CommandFailed(String),
|
||||
IoError(String),
|
||||
JsonError(String),
|
||||
DependencyMissing(String),
|
||||
InvalidSpec(String),
|
||||
NotFound(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for CloudHvError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
CloudHvError::CommandFailed(e) => write!(f, "{}", e),
|
||||
CloudHvError::IoError(e) => write!(f, "IO error: {}", e),
|
||||
CloudHvError::JsonError(e) => write!(f, "JSON error: {}", e),
|
||||
CloudHvError::DependencyMissing(e) => write!(f, "Dependency missing: {}", e),
|
||||
CloudHvError::InvalidSpec(e) => write!(f, "Invalid spec: {}", e),
|
||||
CloudHvError::NotFound(e) => write!(f, "{}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for CloudHvError {}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmSpec {
|
||||
pub id: String,
|
||||
/// Optional for firmware boot; required for direct kernel boot
|
||||
pub kernel_path: Option<String>,
|
||||
/// Optional for direct kernel boot; required for firmware boot
|
||||
pub firmware_path: Option<String>,
|
||||
/// Disk image path (qcow2 or raw)
|
||||
pub disk_path: String,
|
||||
/// API socket path for ch-remote and management
|
||||
pub api_socket: String,
|
||||
/// vCPUs to boot with
|
||||
pub vcpus: u32,
|
||||
/// Memory in MB
|
||||
pub memory_mb: u32,
|
||||
/// Kernel cmdline (only used for direct kernel boot)
|
||||
pub cmdline: Option<String>,
|
||||
/// Extra args (raw) if you need to extend; keep minimal for Phase 2
|
||||
pub extra_args: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmRuntime {
|
||||
/// PID of cloud-hypervisor process if running
|
||||
pub pid: Option<i64>,
|
||||
/// Last known status: "stopped" | "running"
|
||||
pub status: String,
|
||||
/// Console log file path
|
||||
pub log_file: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VmRecord {
|
||||
pub spec: VmSpec,
|
||||
pub runtime: VmRuntime,
|
||||
}
|
||||
|
||||
fn ensure_deps() -> Result<(), CloudHvError> {
|
||||
if sal_process::which("cloud-hypervisor-static").is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(
|
||||
"cloud-hypervisor-static not found on PATH. Install Cloud Hypervisor static binary.".into(),
|
||||
));
|
||||
}
|
||||
if sal_process::which("ch-remote-static").is_none() {
|
||||
return Err(CloudHvError::DependencyMissing(
|
||||
"ch-remote-static not found on PATH. Install Cloud Hypervisor tools (static).".into(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn hero_vm_root() -> PathBuf {
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".into());
|
||||
Path::new(&home).join("hero/virt/vms")
|
||||
}
|
||||
|
||||
fn vm_dir(id: &str) -> PathBuf {
|
||||
hero_vm_root().join(id)
|
||||
}
|
||||
|
||||
fn vm_json_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("vm.json")
|
||||
}
|
||||
|
||||
fn vm_log_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("logs/console.log")
|
||||
}
|
||||
|
||||
fn vm_pid_path(id: &str) -> PathBuf {
|
||||
vm_dir(id).join("pid")
|
||||
}
|
||||
|
||||
fn write_json(path: &Path, value: &serde_json::Value) -> Result<(), CloudHvError> {
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
}
|
||||
let s = serde_json::to_string_pretty(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
fs::write(path, s).map_err(|e| CloudHvError::IoError(e.to_string()))
|
||||
}
|
||||
|
||||
fn read_json(path: &Path) -> Result<serde_json::Value, CloudHvError> {
|
||||
let content = fs::read_to_string(path).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
serde_json::from_str(&content).map_err(|e| CloudHvError::JsonError(e.to_string()))
|
||||
}
|
||||
|
||||
fn proc_exists(pid: i64) -> bool {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
Path::new(&format!("/proc/{}", pid)).exists()
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
// Minimal check for non-Linux; try a kill -0 style command
|
||||
let res = sal_process::run(&format!("kill -0 {}", pid)).die(false).silent(true).execute();
|
||||
res.map(|r| r.success).unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create and persist a VM spec
|
||||
pub fn vm_create(spec: &VmSpec) -> Result<String, CloudHvError> {
|
||||
// Validate inputs minimally
|
||||
if spec.id.trim().is_empty() {
|
||||
return Err(CloudHvError::InvalidSpec("spec.id must not be empty".into()));
|
||||
}
|
||||
// Validate boot method: either firmware_path exists or kernel_path exists
|
||||
let has_fw = spec
|
||||
.firmware_path
|
||||
.as_ref()
|
||||
.map(|p| Path::new(p).exists())
|
||||
.unwrap_or(false);
|
||||
let has_kernel = spec
|
||||
.kernel_path
|
||||
.as_ref()
|
||||
.map(|p| Path::new(p).exists())
|
||||
.unwrap_or(false);
|
||||
|
||||
if !(has_fw || has_kernel) {
|
||||
return Err(CloudHvError::InvalidSpec(
|
||||
"either firmware_path or kernel_path must be set to an existing file".into(),
|
||||
));
|
||||
}
|
||||
|
||||
if !Path::new(&spec.disk_path).exists() {
|
||||
return Err(CloudHvError::InvalidSpec(format!(
|
||||
"disk_path not found: {}",
|
||||
&spec.disk_path
|
||||
)));
|
||||
}
|
||||
if spec.vcpus == 0 {
|
||||
return Err(CloudHvError::InvalidSpec("vcpus must be >= 1".into()));
|
||||
}
|
||||
if spec.memory_mb == 0 {
|
||||
return Err(CloudHvError::InvalidSpec("memory_mb must be >= 128".into()));
|
||||
}
|
||||
|
||||
// Prepare directory layout
|
||||
let dir = vm_dir(&spec.id);
|
||||
sal_os::mkdir(
|
||||
dir.to_str()
|
||||
.unwrap_or_else(|| "/tmp/hero/virt/vms/__invalid__"),
|
||||
)
|
||||
.map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
let log_dir = dir.join("logs");
|
||||
sal_os::mkdir(log_dir.to_str().unwrap()).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
|
||||
// Persist initial record
|
||||
let rec = VmRecord {
|
||||
spec: spec.clone(),
|
||||
runtime: VmRuntime {
|
||||
pid: None,
|
||||
status: "stopped".into(),
|
||||
log_file: vm_log_path(&spec.id).to_string_lossy().into_owned(),
|
||||
},
|
||||
};
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&vm_json_path(&spec.id), &value)?;
|
||||
|
||||
Ok(spec.id.clone())
|
||||
}
|
||||
|
||||
/// Start a VM using cloud-hypervisor
|
||||
pub fn vm_start(id: &str) -> Result<(), CloudHvError> {
|
||||
ensure_deps()?;
|
||||
|
||||
// Load record
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let value = read_json(&p)?;
|
||||
let mut rec: VmRecord =
|
||||
serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// Prepare invocation
|
||||
let api_socket = if rec.spec.api_socket.trim().is_empty() {
|
||||
vm_dir(id).join("api.sock").to_string_lossy().into_owned()
|
||||
} else {
|
||||
rec.spec.api_socket.clone()
|
||||
};
|
||||
let log_file = vm_log_path(id).to_string_lossy().into_owned();
|
||||
|
||||
// Build command (minimal args for Phase 2)
|
||||
// We redirect all output to log_file via shell and keep process in background with nohup
|
||||
|
||||
// CH CLI flags (very common subset)
|
||||
// --disk path=... uses virtio-blk by default
|
||||
let mut parts: Vec<String> = vec![
|
||||
"cloud-hypervisor-static".into(),
|
||||
"--api-socket".into(),
|
||||
api_socket.clone(),
|
||||
];
|
||||
|
||||
if let Some(fw) = rec.spec.firmware_path.clone() {
|
||||
// Firmware boot path
|
||||
parts.push("--firmware".into());
|
||||
parts.push(fw);
|
||||
} else if let Some(kpath) = rec.spec.kernel_path.clone() {
|
||||
// Direct kernel boot path
|
||||
let cmdline = rec
|
||||
.spec
|
||||
.cmdline
|
||||
.clone()
|
||||
.unwrap_or_else(|| "console=ttyS0 reboot=k panic=1".to_string());
|
||||
parts.push("--kernel".into());
|
||||
parts.push(kpath);
|
||||
parts.push("--cmdline".into());
|
||||
parts.push(cmdline);
|
||||
} else {
|
||||
return Err(CloudHvError::InvalidSpec(
|
||||
"neither firmware_path nor kernel_path set at start time".into(),
|
||||
));
|
||||
}
|
||||
|
||||
parts.push("--disk".into());
|
||||
parts.push(format!("path={}", rec.spec.disk_path));
|
||||
parts.push("--cpus".into());
|
||||
parts.push(format!("boot={}", rec.spec.vcpus));
|
||||
parts.push("--memory".into());
|
||||
parts.push(format!("size={}M", rec.spec.memory_mb));
|
||||
parts.push("--serial".into());
|
||||
parts.push("tty".into());
|
||||
parts.push("--console".into());
|
||||
parts.push("off".into());
|
||||
|
||||
if let Some(extra) = rec.spec.extra_args.clone() {
|
||||
for e in extra {
|
||||
parts.push(e);
|
||||
}
|
||||
}
|
||||
|
||||
let args_str = shell_join(&parts);
|
||||
let script = format!(
|
||||
"#!/bin/bash -e
|
||||
nohup {} > '{}' 2>&1 &
|
||||
echo $! > '{}'
|
||||
",
|
||||
args_str,
|
||||
log_file,
|
||||
vm_pid_path(id).to_string_lossy()
|
||||
);
|
||||
|
||||
// Execute script; this will background cloud-hypervisor and return
|
||||
let result = sal_process::run(&script).execute();
|
||||
match result {
|
||||
Ok(res) => {
|
||||
if !res.success {
|
||||
return Err(CloudHvError::CommandFailed(format!(
|
||||
"Failed to start VM '{}': {}",
|
||||
id, res.stderr
|
||||
)));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(CloudHvError::CommandFailed(format!(
|
||||
"Failed to start VM '{}': {}",
|
||||
id, e
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
// Read PID back
|
||||
let pid = match fs::read_to_string(vm_pid_path(id)) {
|
||||
Ok(s) => s.trim().parse::<i64>().ok(),
|
||||
Err(_) => None,
|
||||
};
|
||||
|
||||
// Update state
|
||||
rec.runtime.pid = pid;
|
||||
rec.runtime.status = if pid.is_some() { "running".into() } else { "stopped".into() };
|
||||
rec.runtime.log_file = log_file;
|
||||
rec.spec.api_socket = api_socket.clone();
|
||||
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&vm_json_path(id), &value)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return VM record info (spec + runtime) by id
|
||||
pub fn vm_info(id: &str) -> Result<VmRecord, CloudHvError> {
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let value = read_json(&p)?;
|
||||
let rec: VmRecord = serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
Ok(rec)
|
||||
}
|
||||
|
||||
/// Stop a VM via ch-remote (graceful), optionally force kill
|
||||
pub fn vm_stop(id: &str, force: bool) -> Result<(), CloudHvError> {
|
||||
ensure_deps().ok(); // best-effort; we might still force-kill
|
||||
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let value = read_json(&p)?;
|
||||
let mut rec: VmRecord =
|
||||
serde_json::from_value(value).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// Attempt graceful shutdown if api socket known
|
||||
if !rec.spec.api_socket.trim().is_empty() {
|
||||
let cmd = format!("ch-remote-static --api-socket {} shutdown", rec.spec.api_socket);
|
||||
let _ = sal_process::run(&cmd).die(false).silent(true).execute();
|
||||
}
|
||||
|
||||
// Wait a bit for process to exit
|
||||
if let Some(pid) = rec.runtime.pid {
|
||||
for _ in 0..20 {
|
||||
if !proc_exists(pid) {
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
}
|
||||
// If still alive and force, kill -9
|
||||
if proc_exists(pid) && force {
|
||||
let _ = sal_process::run(&format!("kill -9 {}", pid))
|
||||
.die(false)
|
||||
.silent(true)
|
||||
.execute();
|
||||
}
|
||||
}
|
||||
|
||||
// Update state
|
||||
rec.runtime.status = "stopped".into();
|
||||
rec.runtime.pid = None;
|
||||
let value = serde_json::to_value(&rec).map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
write_json(&vm_json_path(id), &value)?;
|
||||
|
||||
// Remove pid file
|
||||
let _ = fs::remove_file(vm_pid_path(id));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a VM definition; optionally delete disks.
|
||||
pub fn vm_delete(id: &str, delete_disks: bool) -> Result<(), CloudHvError> {
|
||||
let p = vm_json_path(id);
|
||||
if !p.exists() {
|
||||
return Err(CloudHvError::NotFound(format!("VM '{}' not found", id)));
|
||||
}
|
||||
let rec: VmRecord = serde_json::from_value(read_json(&p)?)
|
||||
.map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
// Refuse to delete if still running
|
||||
if let Some(pid) = rec.runtime.pid {
|
||||
if proc_exists(pid) {
|
||||
return Err(CloudHvError::CommandFailed(
|
||||
"VM appears to be running; stop it first".into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if delete_disks {
|
||||
let _ = fs::remove_file(&rec.spec.disk_path);
|
||||
}
|
||||
|
||||
let d = vm_dir(id);
|
||||
fs::remove_dir_all(&d).map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all VMs
|
||||
pub fn vm_list() -> Result<Vec<VmRecord>, CloudHvError> {
|
||||
let root = hero_vm_root();
|
||||
if !root.exists() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
let mut out = vec![];
|
||||
for entry in fs::read_dir(&root).map_err(|e| CloudHvError::IoError(e.to_string()))? {
|
||||
let entry = entry.map_err(|e| CloudHvError::IoError(e.to_string()))?;
|
||||
let p = entry.path();
|
||||
if !p.is_dir() {
|
||||
continue;
|
||||
}
|
||||
let vm_json = p.join("vm.json");
|
||||
if !vm_json.exists() {
|
||||
continue;
|
||||
}
|
||||
let rec: VmRecord = serde_json::from_value(read_json(&vm_json)?)
|
||||
.map_err(|e| CloudHvError::JsonError(e.to_string()))?;
|
||||
|
||||
out.push(rec);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Render a shell-safe command string from vector of tokens
|
||||
fn shell_join(parts: &Vec<String>) -> String {
|
||||
let mut s = String::new();
|
||||
for (i, p) in parts.iter().enumerate() {
|
||||
if i > 0 {
|
||||
s.push(' ');
|
||||
}
|
||||
s.push_str(&shell_escape(p));
|
||||
}
|
||||
s
|
||||
}
|
||||
|
||||
fn shell_escape(s: &str) -> String {
|
||||
if s.is_empty() {
|
||||
return "''".into();
|
||||
}
|
||||
if s
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || "-_./=:".contains(c))
|
||||
{
|
||||
return s.into();
|
||||
}
|
||||
// single-quote wrap, escape existing quotes
|
||||
let mut out = String::from("'");
|
||||
for ch in s.chars() {
|
||||
if ch == '\'' {
|
||||
out.push_str("'\"'\"'");
|
||||
} else {
|
||||
out.push(ch);
|
||||
}
|
||||
}
|
||||
out.push('\'');
|
||||
out
|
||||
}
|
@@ -25,6 +25,7 @@ pub mod buildah;
|
||||
pub mod nerdctl;
|
||||
pub mod rfs;
|
||||
pub mod qcow2;
|
||||
pub mod cloudhv;
|
||||
|
||||
pub mod rhai;
|
||||
|
||||
|
@@ -1,4 +1,5 @@
|
||||
pub mod buildah;
|
||||
pub mod nerdctl;
|
||||
pub mod rfs;
|
||||
pub mod qcow2;
|
||||
pub mod qcow2;
|
||||
pub mod cloudhv;
|
@@ -9,6 +9,7 @@ pub mod buildah;
|
||||
pub mod nerdctl;
|
||||
pub mod rfs;
|
||||
pub mod qcow2;
|
||||
pub mod cloudhv;
|
||||
|
||||
/// Register all Virt module functions with the Rhai engine
|
||||
///
|
||||
@@ -32,6 +33,9 @@ pub fn register_virt_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult
|
||||
// Register QCOW2 module functions
|
||||
qcow2::register_qcow2_module(engine)?;
|
||||
|
||||
// Register Cloud Hypervisor module functions
|
||||
cloudhv::register_cloudhv_module(engine)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -40,3 +44,4 @@ pub use buildah::{bah_new, register_bah_module};
|
||||
pub use nerdctl::register_nerdctl_module;
|
||||
pub use rfs::register_rfs_module;
|
||||
pub use qcow2::register_qcow2_module;
|
||||
pub use cloudhv::register_cloudhv_module;
|
||||
|
173
packages/system/virt/src/rhai/cloudhv.rs
Normal file
173
packages/system/virt/src/rhai/cloudhv.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
use crate::cloudhv;
|
||||
use crate::cloudhv::{VmRecord, VmRuntime, VmSpec};
|
||||
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map};
|
||||
|
||||
// Error adapter
|
||||
fn hv_to_rhai<T>(r: Result<T, cloudhv::CloudHvError>) -> Result<T, Box<EvalAltResult>> {
|
||||
r.map_err(|e| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("cloudhv error: {}", e).into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
// Map conversions
|
||||
|
||||
fn map_to_vmspec(spec: Map) -> Result<VmSpec, Box<EvalAltResult>> {
|
||||
let id = must_get_string(&spec, "id")?;
|
||||
let kernel_path = get_string(&spec, "kernel_path");
|
||||
let firmware_path = get_string(&spec, "firmware_path");
|
||||
let disk_path = must_get_string(&spec, "disk_path")?;
|
||||
let api_socket = get_string(&spec, "api_socket").unwrap_or_else(|| "".to_string());
|
||||
let vcpus = get_int(&spec, "vcpus").unwrap_or(1) as u32;
|
||||
let memory_mb = get_int(&spec, "memory_mb").unwrap_or(512) as u32;
|
||||
let cmdline = get_string(&spec, "cmdline");
|
||||
let extra_args = get_string_array(&spec, "extra_args");
|
||||
|
||||
Ok(VmSpec {
|
||||
id,
|
||||
kernel_path,
|
||||
firmware_path,
|
||||
disk_path,
|
||||
api_socket,
|
||||
vcpus,
|
||||
memory_mb,
|
||||
cmdline,
|
||||
extra_args,
|
||||
})
|
||||
}
|
||||
|
||||
fn vmspec_to_map(s: &VmSpec) -> Map {
|
||||
let mut m = Map::new();
|
||||
m.insert("id".into(), s.id.clone().into());
|
||||
if let Some(k) = &s.kernel_path {
|
||||
m.insert("kernel_path".into(), k.clone().into());
|
||||
} else {
|
||||
m.insert("kernel_path".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(fw) = &s.firmware_path {
|
||||
m.insert("firmware_path".into(), fw.clone().into());
|
||||
} else {
|
||||
m.insert("firmware_path".into(), Dynamic::UNIT);
|
||||
}
|
||||
m.insert("disk_path".into(), s.disk_path.clone().into());
|
||||
m.insert("api_socket".into(), s.api_socket.clone().into());
|
||||
m.insert("vcpus".into(), (s.vcpus as i64).into());
|
||||
m.insert("memory_mb".into(), (s.memory_mb as i64).into());
|
||||
if let Some(c) = &s.cmdline {
|
||||
m.insert("cmdline".into(), c.clone().into());
|
||||
} else {
|
||||
m.insert("cmdline".into(), Dynamic::UNIT);
|
||||
}
|
||||
if let Some(arr) = &s.extra_args {
|
||||
let mut a = Array::new();
|
||||
for s in arr {
|
||||
a.push(s.clone().into());
|
||||
}
|
||||
m.insert("extra_args".into(), a.into());
|
||||
} else {
|
||||
m.insert("extra_args".into(), Dynamic::UNIT);
|
||||
}
|
||||
m
|
||||
}
|
||||
|
||||
fn vmruntime_to_map(r: &VmRuntime) -> Map {
|
||||
let mut m = Map::new();
|
||||
match r.pid {
|
||||
Some(p) => m.insert("pid".into(), (p as i64).into()),
|
||||
None => m.insert("pid".into(), Dynamic::UNIT),
|
||||
};
|
||||
m.insert("status".into(), r.status.clone().into());
|
||||
m.insert("log_file".into(), r.log_file.clone().into());
|
||||
m
|
||||
}
|
||||
|
||||
fn vmrecord_to_map(rec: &VmRecord) -> Map {
|
||||
let mut m = Map::new();
|
||||
m.insert("spec".into(), vmspec_to_map(&rec.spec).into());
|
||||
m.insert("runtime".into(), vmruntime_to_map(&rec.runtime).into());
|
||||
m
|
||||
}
|
||||
|
||||
// Helpers for reading Rhai Map fields
|
||||
|
||||
fn must_get_string(m: &Map, k: &str) -> Result<String, Box<EvalAltResult>> {
|
||||
match m.get(k) {
|
||||
Some(v) if v.is_string() => Ok(v.clone().cast::<String>()),
|
||||
_ => Err(Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("missing or non-string field '{}'", k).into(),
|
||||
rhai::Position::NONE,
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_string(m: &Map, k: &str) -> Option<String> {
|
||||
m.get(k).and_then(|v| if v.is_string() { Some(v.clone().cast::<String>()) } else { None })
|
||||
}
|
||||
|
||||
fn get_int(m: &Map, k: &str) -> Option<i64> {
|
||||
m.get(k).and_then(|v| v.as_int().ok())
|
||||
}
|
||||
|
||||
fn get_string_array(m: &Map, k: &str) -> Option<Vec<String>> {
|
||||
m.get(k).and_then(|v| {
|
||||
if v.is_array() {
|
||||
let arr = v.clone().cast::<Array>();
|
||||
let mut out = vec![];
|
||||
for it in arr {
|
||||
if it.is_string() {
|
||||
out.push(it.cast::<String>());
|
||||
}
|
||||
}
|
||||
Some(out)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Rhai-exposed functions
|
||||
|
||||
pub fn cloudhv_vm_create(spec: Map) -> Result<String, Box<EvalAltResult>> {
|
||||
let s = map_to_vmspec(spec)?;
|
||||
hv_to_rhai(cloudhv::vm_create(&s))
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_start(id: &str) -> Result<(), Box<EvalAltResult>> {
|
||||
hv_to_rhai(cloudhv::vm_start(id))
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_stop(id: &str, force: bool) -> Result<(), Box<EvalAltResult>> {
|
||||
hv_to_rhai(cloudhv::vm_stop(id, force))
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_delete(id: &str, delete_disks: bool) -> Result<(), Box<EvalAltResult>> {
|
||||
hv_to_rhai(cloudhv::vm_delete(id, delete_disks))
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_list() -> Result<Array, Box<EvalAltResult>> {
|
||||
let vms = hv_to_rhai(cloudhv::vm_list())?;
|
||||
let mut arr = Array::new();
|
||||
for rec in vms {
|
||||
arr.push(vmrecord_to_map(&rec).into());
|
||||
}
|
||||
Ok(arr)
|
||||
}
|
||||
|
||||
pub fn cloudhv_vm_info(id: &str) -> Result<Map, Box<EvalAltResult>> {
|
||||
let rec = hv_to_rhai(cloudhv::vm_info(id))?;
|
||||
Ok(vmrecord_to_map(&rec))
|
||||
}
|
||||
|
||||
// Module registration
|
||||
|
||||
pub fn register_cloudhv_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
engine.register_fn("cloudhv_vm_create", cloudhv_vm_create);
|
||||
engine.register_fn("cloudhv_vm_start", cloudhv_vm_start);
|
||||
engine.register_fn("cloudhv_vm_stop", cloudhv_vm_stop);
|
||||
engine.register_fn("cloudhv_vm_delete", cloudhv_vm_delete);
|
||||
engine.register_fn("cloudhv_vm_list", cloudhv_vm_list);
|
||||
engine.register_fn("cloudhv_vm_info", cloudhv_vm_info);
|
||||
Ok(())
|
||||
}
|
@@ -67,17 +67,18 @@ print("✓ snapshot deleted: s1");
|
||||
|
||||
// Optional: Base image builder (commented to avoid big downloads by default)
|
||||
// Uncomment to test manually on a dev machine with bandwidth.
|
||||
// print("\n--- Optional: Build Ubuntu 24.04 Base ---");
|
||||
// let base_dir = "/tmp/virt_images";
|
||||
// let base = qcow2_build_ubuntu_24_04_base(base_dir, 10);
|
||||
// if base.is_err() {
|
||||
// print(`⚠️ base build failed or skipped: ${base.unwrap_err()}`);
|
||||
// } else {
|
||||
// let m = base.unwrap();
|
||||
// print(`✓ Base image path: ${m.base_image_path}`);
|
||||
// print(`✓ Base snapshot: ${m.snapshot}`);
|
||||
// print(`✓ Source URL: ${m.url}`);
|
||||
// if m.resized_to_gb != () { print(`✓ Resized to: ${m.resized_to_gb}G`); }
|
||||
// }
|
||||
print("\n--- Optional: Build Ubuntu 24.04 Base ---");
|
||||
let base_dir = "/tmp/virt_images";
|
||||
let m;
|
||||
try {
|
||||
m = qcow2_build_ubuntu_24_04_base(base_dir, 10);
|
||||
} catch (err) {
|
||||
print(`⚠️ base build failed or skipped: ${err}`);
|
||||
exit();
|
||||
}
|
||||
print(`✓ Base image path: ${m.base_image_path}`);
|
||||
print(`✓ Base snapshot: ${m.snapshot}`);
|
||||
print(`✓ Source URL: ${m.url}`);
|
||||
if m.resized_to_gb != () { print(`✓ Resized to: ${m.resized_to_gb}G`); }
|
||||
|
||||
print("\n=== QCOW2 Basic Tests Completed ===");
|
129
packages/system/virt/tests/rhai/05_cloudhv_basic.rhai
Normal file
129
packages/system/virt/tests/rhai/05_cloudhv_basic.rhai
Normal file
@@ -0,0 +1,129 @@
|
||||
// Basic Cloud Hypervisor SAL smoke test (minimal)
|
||||
// - Skips gracefully if dependencies or inputs are missing
|
||||
// - Creates a VM spec, optionally starts/stops it if all inputs are available
|
||||
|
||||
print("=== Cloud Hypervisor Basic Tests ===");
|
||||
|
||||
// Dependency checks (static binaries only)
|
||||
let chs = which("cloud-hypervisor-static");
|
||||
let chrs = which("ch-remote-static");
|
||||
|
||||
// Normalize which() results: () or "" both mean missing (depending on SAL which variant)
|
||||
let ch_missing = (chs == () || chs == "");
|
||||
let chr_missing = (chrs == () || chrs == "");
|
||||
|
||||
if ch_missing || chr_missing {
|
||||
print("⚠️ cloud-hypervisor-static and/or ch-remote-static not available - skipping CloudHV tests");
|
||||
print("Install Cloud Hypervisor static binaries to run these tests.");
|
||||
print("=== CloudHV Tests Skipped ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
// Inputs (adjust these for your environment)
|
||||
// Prefer firmware boot if firmware is available; otherwise fallback to direct kernel boot.
|
||||
let firmware_path = "/tmp/virt_images/hypervisor-fw";
|
||||
let kernel_path = "/path/to/vmlinux"; // optional when firmware_path is present
|
||||
|
||||
// We can reuse the base image from the QCOW2 test/builder if present.
|
||||
let disk_path = "/tmp/virt_images/noble-server-cloudimg-amd64.img";
|
||||
|
||||
// Validate inputs
|
||||
let missing = false;
|
||||
let have_firmware = exist(firmware_path);
|
||||
let have_kernel = exist(kernel_path);
|
||||
if !have_firmware && !have_kernel {
|
||||
print(`⚠️ neither firmware_path (${firmware_path}) nor kernel_path (${kernel_path}) found (start/stop will be skipped)`);
|
||||
missing = true;
|
||||
}
|
||||
if !exist(disk_path) {
|
||||
print(`⚠️ disk_path not found: ${disk_path} (start/stop will be skipped)`);
|
||||
missing = true;
|
||||
}
|
||||
|
||||
// Unique id
|
||||
let rid = run_silent("date +%s%N");
|
||||
let suffix = if rid.success && rid.stdout != "" { rid.stdout.trim() } else { "100000" };
|
||||
let vm_id = `testvm_${suffix}`;
|
||||
|
||||
print("\n--- Test 1: Create VM definition ---");
|
||||
let spec = #{
|
||||
"id": vm_id,
|
||||
"disk_path": disk_path,
|
||||
"api_socket": "", // default under VM dir
|
||||
"vcpus": 1,
|
||||
"memory_mb": 1024,
|
||||
// For firmware boot:
|
||||
// Provide firmware_path only if it exists
|
||||
// For kernel boot:
|
||||
// Provide kernel_path and optionally a cmdline
|
||||
};
|
||||
if have_firmware {
|
||||
spec.firmware_path = firmware_path;
|
||||
} else if have_kernel {
|
||||
spec.kernel_path = kernel_path;
|
||||
spec.cmdline = "console=ttyS0 reboot=k panic=1";
|
||||
}
|
||||
// "extra_args": can be added if needed, e.g.:
|
||||
// spec.extra_args = ["--rng", "src=/dev/urandom"];
|
||||
|
||||
try {
|
||||
let created_id = cloudhv_vm_create(spec);
|
||||
print(`✓ VM created: ${created_id}`);
|
||||
} catch (err) {
|
||||
print(`❌ VM create failed: ${err}`);
|
||||
print("=== CloudHV Tests Aborted ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
print("\n--- Test 2: VM info ---");
|
||||
try {
|
||||
let info = cloudhv_vm_info(vm_id);
|
||||
print(`✓ VM info loaded: id=${info.spec.id}, status=${info.runtime.status}`);
|
||||
} catch (err) {
|
||||
print(`❌ VM info failed: ${err}`);
|
||||
print("=== CloudHV Tests Aborted ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
print("\n--- Test 3: VM list ---");
|
||||
try {
|
||||
let vms = cloudhv_vm_list();
|
||||
print(`✓ VM list size: ${vms.len()}`);
|
||||
} catch (err) {
|
||||
print(`❌ VM list failed: ${err}`);
|
||||
print("=== CloudHV Tests Aborted ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
// Start/Stop only if inputs exist
|
||||
if !missing {
|
||||
print("\n--- Test 4: Start VM ---");
|
||||
try {
|
||||
cloudhv_vm_start(vm_id);
|
||||
print("✓ VM start invoked");
|
||||
} catch (err) {
|
||||
print(`⚠️ VM start failed (this can happen if kernel/cmdline are incompatible): ${err}`);
|
||||
}
|
||||
|
||||
print("\n--- Test 5: Stop VM (graceful) ---");
|
||||
try {
|
||||
cloudhv_vm_stop(vm_id, false);
|
||||
print("✓ VM stop invoked (graceful)");
|
||||
} catch (err) {
|
||||
print(`⚠️ VM stop failed: ${err}`);
|
||||
}
|
||||
} else {
|
||||
print("\n⚠️ Skipping start/stop because required inputs are missing.");
|
||||
}
|
||||
|
||||
print("\n--- Test 6: Delete VM definition ---");
|
||||
try {
|
||||
cloudhv_vm_delete(vm_id, false);
|
||||
print("✓ VM deleted");
|
||||
} catch (err) {
|
||||
print(`❌ VM delete failed: ${err}`);
|
||||
print("=== CloudHV Tests Aborted ===");
|
||||
exit();
|
||||
}
|
||||
|
||||
print("\n=== Cloud Hypervisor Basic Tests Completed ===");
|
Reference in New Issue
Block a user