From 7b859d274c62b8f918fdaf3fb1a5241c54b106ce Mon Sep 17 00:00:00 2001 From: kristof Date: Sun, 15 Jun 2025 19:16:04 +0200 Subject: [PATCH] ... --- tools/ubuntu_vm_delete.sh | 13 +- tools/ubuntu_vm_start.sh | 455 ++++++++++++++++++++++++++++++++++---- 2 files changed, 427 insertions(+), 41 deletions(-) diff --git a/tools/ubuntu_vm_delete.sh b/tools/ubuntu_vm_delete.sh index 31fde86..c0a1338 100755 --- a/tools/ubuntu_vm_delete.sh +++ b/tools/ubuntu_vm_delete.sh @@ -220,10 +220,15 @@ delete_single_vm() { stop_vm_process "$VM_PID" "$vm_name" else # Try to find the process by name - local found_pid=$(pgrep -f "cloud-hypervisor.*$vm_name" 2>/dev/null || echo "") - if [ -n "$found_pid" ]; then - warn "Found VM process by name: $found_pid" - stop_vm_process "$found_pid" "$vm_name" + local found_pids=$(pgrep -f "cloud-hypervisor.*$vm_name" 2>/dev/null || echo "") + if [ -n "$found_pids" ]; then + warn "Found VM process(es) by name: $found_pids" + # Process each PID separately + echo "$found_pids" | while read -r pid; do + if [ -n "$pid" ]; then + stop_vm_process "$pid" "$vm_name" + fi + done fi fi diff --git a/tools/ubuntu_vm_start.sh b/tools/ubuntu_vm_start.sh index fbd04fd..8ed4b1d 100755 --- a/tools/ubuntu_vm_start.sh +++ b/tools/ubuntu_vm_start.sh @@ -21,6 +21,11 @@ BTRFS_MOUNT_POINT="/var/lib/vms" BASE_SUBVOL="$BTRFS_MOUNT_POINT/base" VMS_SUBVOL="$BTRFS_MOUNT_POINT/vms" +# Network configuration +BRIDGE_NAME="br0" +BRIDGE_IP="192.168.100.1/24" +NETWORK="192.168.100.0/24" + log() { echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')] $1${NC}" } @@ -31,6 +36,12 @@ warn() { error() { echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR: $1${NC}" + + # If VM_NAME is set and we're in VM creation phase, clean up + if [ -n "$VM_NAME" ] && [ -n "$VM_PID" ]; then + cleanup_failed_vm "$VM_NAME" + fi + exit 1 } @@ -38,6 +49,126 @@ info() { echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')] INFO: $1${NC}" } +# Test functions +test_step() { + local step_name="$1" + local test_command="$2" + + info "Testing: $step_name" + if eval "$test_command"; then + log "✓ Test passed: $step_name" + return 0 + else + error "✗ Test failed: $step_name" + return 1 + fi +} + +test_file_exists() { + local file_path="$1" + local description="$2" + test_step "$description" "[ -f '$file_path' ]" +} + +test_directory_exists() { + local dir_path="$1" + local description="$2" + test_step "$description" "[ -d '$dir_path' ]" +} + +test_command_exists() { + local command="$1" + local description="$2" + test_step "$description" "command -v '$command' &> /dev/null" +} + +test_network_interface() { + local interface="$1" + local description="$2" + test_step "$description" "ip link show '$interface' &>/dev/null" +} + +test_process_running() { + local pid="$1" + local description="$2" + test_step "$description" "kill -0 '$pid' 2>/dev/null" +} + +# Cleanup function for failed VM creation +cleanup_failed_vm() { + local vm_name="$1" + warn "VM creation failed, cleaning up..." + + # Call the delete script to clean up + local delete_script="$(dirname "$0")/ubuntu_vm_delete.sh" + if [ -f "$delete_script" ]; then + log "Running cleanup script: $delete_script" + "$delete_script" "$vm_name" || warn "Cleanup script failed, manual cleanup may be required" + else + warn "Delete script not found at $delete_script, manual cleanup required" + fi +} + +# Generate a proper password hash for 'ubuntu' +generate_password_hash() { + # Generate salt and hash for password 'ubuntu' + python3 -c "import crypt; print(crypt.crypt('ubuntu', crypt.mksalt(crypt.METHOD_SHA512)))" +} + +# Wait for VM to boot and get IP +wait_for_vm_boot() { + local vm_name="$1" + local max_wait=120 # 2 minutes + local count=0 + + log "Waiting for VM '$vm_name' to boot and get IP address..." + + while [ $count -lt $max_wait ]; do + # Check if VM got an IP from DHCP + local vm_ip=$(arp -a | grep "192.168.100" | grep -v "192.168.100.1" | head -1 | sed 's/.*(\([^)]*\)).*/\1/') + + if [ -n "$vm_ip" ] && [ "$vm_ip" != "192.168.100.1" ]; then + log "VM got IP address: $vm_ip" + echo "$vm_ip" + return 0 + fi + + sleep 2 + count=$((count + 2)) + if [ $((count % 10)) -eq 0 ]; then + info "Still waiting for VM to boot... ($count/$max_wait seconds)" + fi + done + + warn "VM did not get an IP address within $max_wait seconds" + return 1 +} + +# Test SSH connectivity +test_ssh_connection() { + local vm_ip="$1" + local max_attempts=10 + local attempt=1 + + log "Testing SSH connectivity to $vm_ip..." + + while [ $attempt -le $max_attempts ]; do + info "SSH attempt $attempt/$max_attempts" + + # Test SSH connection with timeout + if timeout 10 sshpass -p 'ubuntu' ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 ubuntu@"$vm_ip" 'echo "SSH connection successful"' 2>/dev/null; then + log "✓ SSH connection successful to ubuntu@$vm_ip" + return 0 + fi + + sleep 5 + attempt=$((attempt + 1)) + done + + error "✗ SSH connection failed after $max_attempts attempts" + return 1 +} + # Check if running as root if [ "$EUID" -ne 0 ]; then error "This script must be run as root for btrfs operations" @@ -67,19 +198,53 @@ fi log "Starting VM: $VM_NAME with ${MEMORY_MB}MB RAM and $CPU_CORES CPU cores" +# Comprehensive prerequisite checks +log "Performing prerequisite checks..." + # Check if cloud-hypervisor is available -if ! command -v cloud-hypervisor &> /dev/null; then - error "cloud-hypervisor not found. Please install it first." -fi +test_command_exists "cloud-hypervisor" "Cloud Hypervisor installation" # Check if qemu-img is available (for image conversion) if ! command -v qemu-img &> /dev/null; then warn "qemu-img not found. Installing qemu-utils..." apt update && apt install -y qemu-utils + test_command_exists "qemu-img" "QEMU tools installation" fi +# Check for required tools +test_command_exists "curl" "curl installation" +test_command_exists "btrfs" "btrfs tools installation" +test_command_exists "ip" "iproute2 tools installation" + +# Check if ethtool is available (for TAP interface optimization) +if ! command -v ethtool &> /dev/null; then + log "Installing ethtool for network interface optimization..." + apt update && apt install -y ethtool + test_command_exists "ethtool" "ethtool installation" +fi + +# Check if sshpass is available (for SSH testing) +if ! command -v sshpass &> /dev/null; then + log "Installing sshpass for SSH testing..." + apt update && apt install -y sshpass + test_command_exists "sshpass" "sshpass installation" +fi + +# Check if python3 is available (for password hashing) +test_command_exists "python3" "Python3 installation" + +# Check if genisoimage or mkisofs is available +if ! command -v genisoimage &> /dev/null && ! command -v mkisofs &> /dev/null; then + log "Installing genisoimage for cloud-init ISO creation..." + apt update && apt install -y genisoimage +fi + +log "✓ All prerequisites checked" + # Create base directory structure +log "Setting up storage structure..." mkdir -p "$VM_BASE_DIR" +test_directory_exists "$VM_BASE_DIR" "VM base directory creation" # Check if the base directory is on btrfs FILESYSTEM_TYPE=$(stat -f -c %T "$VM_BASE_DIR" 2>/dev/null) @@ -87,19 +252,29 @@ if [ "$FILESYSTEM_TYPE" != "btrfs" ]; then error "Base directory $VM_BASE_DIR is not on a btrfs filesystem (detected: $FILESYSTEM_TYPE). Please create a btrfs filesystem first." fi -log "Btrfs filesystem detected at $VM_BASE_DIR" +log "✓ Btrfs filesystem detected at $VM_BASE_DIR" # Create base and vms subvolumes if they don't exist if [ ! -d "$BASE_SUBVOL" ]; then log "Creating base subvolume at $BASE_SUBVOL" btrfs subvolume create "$BASE_SUBVOL" + test_directory_exists "$BASE_SUBVOL" "Base subvolume creation" +else + log "✓ Base subvolume already exists" fi if [ ! -d "$VMS_SUBVOL" ]; then log "Creating VMs subvolume at $VMS_SUBVOL" btrfs subvolume create "$VMS_SUBVOL" + test_directory_exists "$VMS_SUBVOL" "VMs subvolume creation" +else + log "✓ VMs subvolume already exists" fi +# Verify subvolumes are properly created +test_step "Base subvolume verification" "btrfs subvolume show '$BASE_SUBVOL' &>/dev/null" +test_step "VMs subvolume verification" "btrfs subvolume show '$VMS_SUBVOL' &>/dev/null" + # Define paths BASE_IMAGE_PATH="$BASE_SUBVOL/${BASE_IMAGE_NAME}.raw" FIRMWARE_PATH="$BASE_SUBVOL/hypervisor-fw" @@ -108,6 +283,7 @@ VM_IMAGE_PATH="$VM_SUBVOL_PATH/${VM_NAME}.raw" CLOUD_INIT_PATH="$VM_SUBVOL_PATH/cloud-init.img" # Download and prepare base image if it doesn't exist +log "Preparing base image and firmware..." if [ ! -f "$BASE_IMAGE_PATH" ]; then log "Base image not found. Downloading Ubuntu cloud image..." @@ -116,14 +292,27 @@ if [ ! -f "$BASE_IMAGE_PATH" ]; then if ! curl -L --fail --progress-bar -o "$TEMP_QCOW2" "$BASE_IMAGE_URL"; then error "Failed to download Ubuntu cloud image from $BASE_IMAGE_URL" fi + test_file_exists "$TEMP_QCOW2" "Ubuntu cloud image download" log "Converting qcow2 image to raw format..." qemu-img convert -p -f qcow2 -O raw "$TEMP_QCOW2" "$BASE_IMAGE_PATH" + test_file_exists "$BASE_IMAGE_PATH" "Base image conversion" + + # Verify the converted image + image_info=$(qemu-img info "$BASE_IMAGE_PATH" 2>/dev/null) + if echo "$image_info" | grep -q "file format: raw"; then + log "✓ Base image successfully converted to raw format" + else + error "Base image conversion verification failed" + fi # Cleanup temporary file rm -f "$TEMP_QCOW2" - log "Base image created at $BASE_IMAGE_PATH" + log "✓ Base image created at $BASE_IMAGE_PATH" +else + log "✓ Base image already exists at $BASE_IMAGE_PATH" + test_file_exists "$BASE_IMAGE_PATH" "Base image verification" fi # Download firmware if it doesn't exist @@ -132,25 +321,53 @@ if [ ! -f "$FIRMWARE_PATH" ]; then if ! curl -L --fail --progress-bar -o "$FIRMWARE_PATH" "$FIRMWARE_URL"; then error "Failed to download firmware from $FIRMWARE_URL" fi + test_file_exists "$FIRMWARE_PATH" "Firmware download" chmod +x "$FIRMWARE_PATH" - log "Firmware downloaded to $FIRMWARE_PATH" + test_step "Firmware executable check" "[ -x '$FIRMWARE_PATH' ]" + log "✓ Firmware downloaded to $FIRMWARE_PATH" +else + log "✓ Firmware already exists at $FIRMWARE_PATH" + test_file_exists "$FIRMWARE_PATH" "Firmware verification" fi # Create VM subvolume by cloning from base +log "Setting up VM-specific storage..." if [ -d "$VM_SUBVOL_PATH" ]; then warn "VM subvolume $VM_NAME already exists. Removing it..." - btrfs subvolume delete "$VM_SUBVOL_PATH" + if btrfs subvolume show "$VM_SUBVOL_PATH" &>/dev/null; then + btrfs subvolume delete "$VM_SUBVOL_PATH" + else + rm -rf "$VM_SUBVOL_PATH" + fi + test_step "VM subvolume cleanup" "[ ! -d '$VM_SUBVOL_PATH' ]" fi log "Creating VM subvolume by cloning base subvolume..." btrfs subvolume snapshot "$BASE_SUBVOL" "$VM_SUBVOL_PATH" +test_directory_exists "$VM_SUBVOL_PATH" "VM subvolume creation" +test_step "VM subvolume verification" "btrfs subvolume show '$VM_SUBVOL_PATH' &>/dev/null" # Copy the base image to VM subvolume (this will be a CoW copy initially) log "Creating VM disk image (thin provisioned)..." cp --reflink=always "$BASE_IMAGE_PATH" "$VM_IMAGE_PATH" +test_file_exists "$VM_IMAGE_PATH" "VM disk image creation" + +# Verify the image copy +vm_image_size=$(stat -c%s "$VM_IMAGE_PATH" 2>/dev/null) +base_image_size=$(stat -c%s "$BASE_IMAGE_PATH" 2>/dev/null) +if [ "$vm_image_size" = "$base_image_size" ]; then + log "✓ VM disk image successfully created (size: $vm_image_size bytes)" +else + error "VM disk image size mismatch (VM: $vm_image_size, Base: $base_image_size)" +fi # Create cloud-init image for first boot log "Creating cloud-init configuration..." + +# Generate proper password hash for 'ubuntu' +PASSWORD_HASH=$(generate_password_hash) +test_step "Password hash generation" "[ -n '$PASSWORD_HASH' ]" + cat > "/tmp/user-data" << EOF #cloud-config users: @@ -158,40 +375,59 @@ users: sudo: ALL=(ALL) NOPASSWD:ALL shell: /bin/bash lock_passwd: false - passwd: \$6\$rounds=4096\$saltsalt\$L9.LKkHxeed8Kn9.Kk8nNWn8W.XhHPyjKJJXYqKoTFJJy7P8dMCFK - ssh_authorized_keys: - - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC... # Add your SSH public key here + passwd: $PASSWORD_HASH groups: sudo home: /home/ubuntu +# Enable SSH with password authentication ssh_pwauth: true disable_root: false chpasswd: expire: false -# Enable SSH +# SSH configuration ssh_authorized_keys: [] +# Network configuration to ensure DHCP works +network: + version: 2 + ethernets: + eth0: + dhcp4: true + dhcp-identifier: mac + # Package updates and installs package_update: true -package_upgrade: true +package_upgrade: false packages: + - openssh-server - curl - wget - git - htop - vim + - net-tools + +# Ensure SSH service is enabled and started +runcmd: + - systemctl enable ssh + - systemctl start ssh + - systemctl status ssh # Final message -final_message: "Cloud-init setup complete. VM is ready!" +final_message: "Cloud-init setup complete. VM is ready for SSH access!" EOF +test_file_exists "/tmp/user-data" "Cloud-init user-data creation" + # Create meta-data file cat > "/tmp/meta-data" << EOF instance-id: $VM_NAME local-hostname: $VM_NAME EOF +test_file_exists "/tmp/meta-data" "Cloud-init meta-data creation" + # Create cloud-init ISO log "Creating cloud-init ISO..." if command -v genisoimage &> /dev/null; then @@ -202,61 +438,177 @@ else error "Neither genisoimage nor mkisofs found. Please install genisoimage or cdrtools." fi +test_file_exists "$CLOUD_INIT_PATH" "Cloud-init ISO creation" + +# Verify the ISO was created properly +iso_size=$(stat -c%s "$CLOUD_INIT_PATH" 2>/dev/null) +if [ "$iso_size" -gt 0 ]; then + log "✓ Cloud-init ISO created successfully (size: $iso_size bytes)" +else + error "Cloud-init ISO creation failed or resulted in empty file" +fi + # Cleanup temporary files rm -f /tmp/user-data /tmp/meta-data -log "Cloud-init ISO created at $CLOUD_INIT_PATH" +log "✓ Cloud-init ISO created at $CLOUD_INIT_PATH" # Resize the VM disk to give it more space (optional, expand to 20GB) log "Resizing VM disk to 20GB..." qemu-img resize "$VM_IMAGE_PATH" 20G +# Verify disk resize +new_size=$(qemu-img info "$VM_IMAGE_PATH" | grep "virtual size" | awk '{print $3}') +if echo "$new_size" | grep -q "20"; then + log "✓ VM disk successfully resized to 20GB" +else + warn "VM disk resize verification failed, but continuing..." +fi + # Create network configuration -BRIDGE_NAME="br0" TAP_NAME="tap-$VM_NAME" +log "Setting up network configuration..." + # Check if bridge exists, create if not if ! ip link show "$BRIDGE_NAME" &>/dev/null; then log "Creating bridge interface $BRIDGE_NAME..." ip link add name "$BRIDGE_NAME" type bridge ip link set dev "$BRIDGE_NAME" up - # You may want to configure the bridge with an IP address - # ip addr add 192.168.100.1/24 dev "$BRIDGE_NAME" + + # Configure bridge with IP address for VM network + log "Configuring bridge IP address..." + ip addr add "$BRIDGE_IP" dev "$BRIDGE_NAME" + + test_network_interface "$BRIDGE_NAME" "Bridge interface creation" + test_step "Bridge IP configuration" "ip addr show '$BRIDGE_NAME' | grep -q '192.168.100.1'" +else + log "✓ Bridge interface $BRIDGE_NAME already exists" + # Ensure bridge has IP configured + if ! ip addr show "$BRIDGE_NAME" | grep -q "192.168.100.1"; then + log "Adding IP address to existing bridge..." + ip addr add "$BRIDGE_IP" dev "$BRIDGE_NAME" 2>/dev/null || true + fi fi # Create TAP interface for the VM log "Creating TAP interface $TAP_NAME..." -ip tuntap add dev "$TAP_NAME" mode tap + +# Remove existing TAP interface if it exists +if ip link show "$TAP_NAME" &>/dev/null; then + warn "TAP interface $TAP_NAME already exists, removing it..." + ip link delete "$TAP_NAME" 2>/dev/null || true + sleep 1 # Give time for cleanup +fi + +# Create TAP interface with proper configuration for Cloud Hypervisor +ip tuntap add dev "$TAP_NAME" mode tap user root +test_network_interface "$TAP_NAME" "TAP interface creation" + +# Set TAP interface up ip link set dev "$TAP_NAME" up +sleep 1 # Give the interface a moment to come up +test_step "TAP interface up" "ip link show '$TAP_NAME' | grep -q 'UP'" + +# Attach to bridge ip link set dev "$TAP_NAME" master "$BRIDGE_NAME" +sleep 1 # Give the bridge attachment a moment to complete +test_step "TAP interface bridge attachment" "ip link show '$TAP_NAME' | grep -q 'master'" + +# Disable offloading features that can cause issues with Cloud Hypervisor +ethtool -K "$TAP_NAME" tx off rx off tso off gso off gro off lro off 2>/dev/null || warn "Could not disable TAP interface offloading (ethtool not available)" + +log "✓ Network interfaces configured successfully" + +# Ensure basic networking is set up (simplified version of setup_vm_network.sh) +log "Ensuring basic VM networking is configured..." + +# Enable IP forwarding +echo 1 > /proc/sys/net/ipv4/ip_forward + +# Set up basic NAT rules (remove existing first to avoid duplicates) +iptables -t nat -D POSTROUTING -s "$NETWORK" -j MASQUERADE 2>/dev/null || true +iptables -D FORWARD -i "$BRIDGE_NAME" -j ACCEPT 2>/dev/null || true +iptables -D FORWARD -o "$BRIDGE_NAME" -j ACCEPT 2>/dev/null || true + +# Add new rules +iptables -t nat -A POSTROUTING -s "$NETWORK" -j MASQUERADE +iptables -A FORWARD -i "$BRIDGE_NAME" -j ACCEPT +iptables -A FORWARD -o "$BRIDGE_NAME" -j ACCEPT + +log "✓ Basic NAT and forwarding rules configured" + +# Check if dnsmasq is running for DHCP +if ! systemctl is-active --quiet dnsmasq 2>/dev/null; then + warn "dnsmasq is not running. VMs may not get IP addresses automatically." + warn "Consider running: sudo ./setup_vm_network.sh" +fi # Start the VM with Cloud Hypervisor log "Starting VM $VM_NAME..." VM_SOCKET="/tmp/cloud-hypervisor-$VM_NAME.sock" +VM_LOG_FILE="/tmp/cloud-hypervisor-$VM_NAME.log" -# Remove existing socket if it exists -rm -f "$VM_SOCKET" +# Remove existing socket and log file if they exist +rm -f "$VM_SOCKET" "$VM_LOG_FILE" + +# Generate a random MAC address for the VM +VM_MAC="52:54:00:$(printf '%02x:%02x:%02x' $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)))" +log "Generated MAC address for VM: $VM_MAC" + +# Start Cloud Hypervisor in background with error handling +log "Launching Cloud Hypervisor..." + +# Try to start Cloud Hypervisor and capture any error output +log "Starting Cloud Hypervisor with command:" +log "cloud-hypervisor --api-socket $VM_SOCKET --memory size=${MEMORY_MB}M --cpus boot=$CPU_CORES --firmware $FIRMWARE_PATH --disk path=$VM_IMAGE_PATH path=$CLOUD_INIT_PATH,readonly=on --net tap=$TAP_NAME,mac=$VM_MAC --serial file=$VM_LOG_FILE --console off --event-monitor path=${VM_LOG_FILE}.events" -# Start Cloud Hypervisor in background cloud-hypervisor \ --api-socket "$VM_SOCKET" \ --memory "size=${MEMORY_MB}M" \ --cpus "boot=$CPU_CORES" \ - --kernel "$FIRMWARE_PATH" \ + --firmware "$FIRMWARE_PATH" \ --disk "path=$VM_IMAGE_PATH" "path=$CLOUD_INIT_PATH,readonly=on" \ - --net "tap=$TAP_NAME,mac=52:54:00:$(printf '%02x:%02x:%02x' $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)))" \ - --serial tty \ + --net "tap=$TAP_NAME,mac=$VM_MAC" \ + --serial "file=$VM_LOG_FILE" \ --console off \ - --log-file /tmp/cloud-hypervisor-$VM_NAME.log & + --event-monitor "path=${VM_LOG_FILE}.events" & VM_PID=$! -log "VM $VM_NAME started with PID $VM_PID" +# Check if the process started successfully +if [ -z "$VM_PID" ]; then + error "Failed to get VM process ID" +fi + +# Verify VM process started +sleep 2 +if ! test_process_running "$VM_PID" "VM process startup"; then + error "VM process failed to start or died immediately. Check log: $VM_LOG_FILE" +fi + +log "✓ VM $VM_NAME started successfully with PID $VM_PID" log "VM socket: $VM_SOCKET" +log "VM log file: $VM_LOG_FILE" log "TAP interface: $TAP_NAME" log "Bridge interface: $BRIDGE_NAME" +log "VM MAC address: $VM_MAC" + +# Wait for the socket to be created (Cloud Hypervisor needs time to initialize) +log "Waiting for VM API socket to be created..." +socket_wait_count=0 +while [ ! -S "$VM_SOCKET" ] && [ $socket_wait_count -lt 15 ]; do + sleep 1 + socket_wait_count=$((socket_wait_count + 1)) + if [ $((socket_wait_count % 5)) -eq 0 ]; then + info "Still waiting for API socket... ($socket_wait_count/15 seconds)" + fi +done + +test_file_exists "$VM_SOCKET" "VM API socket creation" # Save VM information for management +log "Saving VM configuration..." VM_INFO_FILE="$VM_SUBVOL_PATH/vm-info.txt" cat > "$VM_INFO_FILE" << EOF VM_NAME=$VM_NAME @@ -268,15 +620,18 @@ MEMORY_MB=$MEMORY_MB CPU_CORES=$CPU_CORES VM_IMAGE_PATH=$VM_IMAGE_PATH CLOUD_INIT_PATH=$CLOUD_INIT_PATH +VM_MAC=$VM_MAC +VM_LOG_FILE=$VM_LOG_FILE STARTED="$(date '+%Y-%m-%d %H:%M:%S')" EOF -log "VM information saved to $VM_INFO_FILE" +test_file_exists "$VM_INFO_FILE" "VM info file creation" +log "✓ VM information saved to $VM_INFO_FILE" -# Function to cleanup on exit -cleanup() { +# Function to cleanup on exit (only for interactive mode) +cleanup_on_exit() { log "Cleaning up VM $VM_NAME..." - if kill -0 "$VM_PID" 2>/dev/null; then + if [ -n "$VM_PID" ] && kill -0 "$VM_PID" 2>/dev/null; then kill "$VM_PID" wait "$VM_PID" 2>/dev/null fi @@ -284,12 +639,38 @@ cleanup() { rm -f "$VM_SOCKET" } -# Set trap for cleanup on script exit -trap cleanup EXIT INT TERM +# Test VM boot and SSH connectivity +log "Testing VM boot and SSH connectivity..." -log "VM $VM_NAME is running. Press Ctrl+C to stop." -log "To connect via SSH (once VM is booted): ssh ubuntu@" -log "Default password: ubuntu (change after first login)" +# Wait for VM to boot and get IP +VM_IP=$(wait_for_vm_boot "$VM_NAME") +if [ $? -ne 0 ] || [ -z "$VM_IP" ]; then + error "VM failed to boot or get IP address. Check log: $VM_LOG_FILE" +fi -# Wait for the VM process -wait "$VM_PID" \ No newline at end of file +log "✓ VM booted successfully and got IP: $VM_IP" + +# Test SSH connectivity +if test_ssh_connection "$VM_IP"; then + log "🎉 SUCCESS: VM $VM_NAME is fully operational!" + log "✓ VM is running with PID $VM_PID" + log "✓ VM has IP address: $VM_IP" + log "✓ SSH is working: ssh ubuntu@$VM_IP (password: ubuntu)" + log "✓ VM info saved to: $VM_INFO_FILE" + echo "" + info "VM $VM_NAME is ready for use!" + info "Connect via SSH: ssh ubuntu@$VM_IP" + info "Default password: ubuntu (please change after first login)" + info "To stop the VM: sudo $(dirname "$0")/ubuntu_vm_delete.sh $VM_NAME" + echo "" + + # Don't set trap for successful VMs - let them run + log "VM $VM_NAME will continue running in the background." + log "Use 'sudo $(dirname "$0")/ubuntu_vm_delete.sh $VM_NAME' to stop and delete it." + +else + error "SSH connectivity test failed. VM will be deleted for retry." +fi + +# If we reach here, the VM is working properly +log "VM startup and testing completed successfully!" \ No newline at end of file