feat: Create minimal Zero-OS initramfs with console support

- Fixed build system to clone source repositories instead of downloading binaries
- Enhanced scripts/fetch-github.sh with proper git repo cloning and branch handling
- Updated scripts/compile-components.sh for RFS compilation with build-binary feature
- Added minimal firmware installation for essential network drivers (73 modules)
- Created comprehensive zinit configuration set (15 config files including getty)
- Added util-linux package for getty/agetty console support
- Optimized package selection for minimal 27MB initramfs footprint
- Successfully builds bootable vmlinuz.efi with embedded initramfs
- Confirmed working: VM boot, console login, network drivers, zinit init system

Components:
- initramfs.cpio.xz: 27MB compressed minimal Zero-OS image
- vmlinuz.efi: 35MB bootable kernel with embedded initramfs
- Complete Zero-OS toolchain: zinit, rfs, mycelium compiled from source
This commit is contained in:
2025-08-16 23:25:59 +02:00
parent 446eb02fb3
commit bf62e887e8
109 changed files with 7645 additions and 1945 deletions

View File

@@ -0,0 +1,33 @@
.PHONY: all e2e performance unit integration clean
all: unit integration e2e performance
# Run unit tests
unit:
@echo "Running unit tests..."
cargo test --lib
# Run integration tests
integration:
@echo "Running integration tests..."
cargo test --test docker_test --test parallel_download_test
# Run end-to-end tests
e2e:
@echo "Running end-to-end tests..."
chmod +x e2e_tests.sh
./e2e_tests.sh
chmod +x e2e_tests_updated.sh
./e2e_tests_updated.sh
# Run performance tests
performance:
@echo "Running performance tests..."
chmod +x performance_tests.sh
./performance_tests.sh
# Clean test artifacts
clean:
@echo "Cleaning test artifacts..."
rm -rf /tmp/rfs-e2e-tests /tmp/rfs-performance-tests /tmp/rfs-upload-download-tests

View File

@@ -0,0 +1,51 @@
# RFS Tests
This directory contains various tests for the RFS tool.
## Test Types
1. **Unit Tests**: Standard Rust unit tests within the codebase
2. **Integration Tests**: Rust tests that verify specific functionality
3. **End-to-End Tests**: Shell scripts that test the full RFS command-line interface
4. **Performance Tests**: Shell scripts that measure and compare performance
## Running Tests
You can use the provided Makefile to run the tests:
```bash
# Run all tests
make all
# Run specific test types
make unit
make integration
make e2e
make performance
# Clean test artifacts
make clean
```
## Test Files
- `e2e_tests.sh` and `e2e_tests_updated.sh`: End-to-end tests for all RFS commands
- `performance_tests.sh`: Performance tests focusing on parallel upload/download
- `docker_test.rs`: Integration test for the Docker functionality
- `parallel_download_test.rs`: Integration test for parallel download feature
- `Makefile`: Simplifies running the tests
## Requirements
- Rust and Cargo for unit and integration tests
- Bash for shell-based tests
- Docker for Docker-related tests
- Root/sudo access for mount tests
## Notes
- The end-to-end tests create temporary directories in `/tmp/rfs-e2e-tests`, also automatically start local servers on port 8080 and 8081 for testing and shut them down after tests complete
- The performance tests create temporary directories in `/tmp/rfs-performance-tests`
- The upload/download tests create temporary directories in `/tmp/rfs-upload-download-tests`
- Some tests require sudo access (mount tests)
- Docker tests will be skipped if Docker is not available

View File

@@ -0,0 +1,66 @@
#[cfg(test)]
mod docker_tests {
use anyhow::Result;
use std::path::Path;
use tempdir::TempDir;
use tokio::runtime::Runtime;
use uuid::Uuid;
use rfs::fungi;
use rfs::store::{self, dir::DirStore};
use rfs::DockerImageToFlist;
#[test]
fn test_docker_conversion() -> Result<()> {
// Skip test if docker is not available
if !is_docker_available() {
println!("Docker is not available, skipping test");
return Ok(());
}
// Create a runtime for async operations
let rt = Runtime::new()?;
rt.block_on(async {
// Create temporary directories
let temp_dir = TempDir::new("docker-test")?;
let store_dir = temp_dir.path().join("store");
std::fs::create_dir_all(&store_dir)?;
// Create a store
let store = DirStore::new(&store_dir).await?;
// Create a flist writer
let fl_path = temp_dir.path().join("alpine-test.fl");
let meta = fungi::Writer::new(&fl_path, true).await?;
// Create a temporary directory for docker extraction
let container_name = Uuid::new_v4().to_string();
let docker_tmp_dir = TempDir::new(&container_name)?;
// Create DockerImageToFlist instance
let mut docker_to_fl = DockerImageToFlist::new(
meta,
"alpine:latest".to_string(),
None, // No credentials for public image
docker_tmp_dir,
);
// Convert docker image to flist
docker_to_fl.convert(store, None).await?;
// Verify the flist was created
assert!(Path::new(&fl_path).exists(), "Flist file was not created");
Ok(())
})
}
// Helper function to check if docker is available
fn is_docker_available() -> bool {
std::process::Command::new("docker")
.arg("--version")
.output()
.is_ok()
}
}

223
components/rfs/tests/e2e_tests.sh Executable file
View File

@@ -0,0 +1,223 @@
#!/bin/bash
set -ex
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# Path to the rfs binary
RFS_BIN="../target/release/rfs"
# Test directory
TEST_DIR="/tmp/rfs-e2e-tests"
CACHE_DIR="$TEST_DIR/cache"
SOURCE_DIR="$TEST_DIR/source"
DEST_DIR="$TEST_DIR/destination"
MOUNT_DIR="$TEST_DIR/mount"
FLIST_PATH="$TEST_DIR/test.fl"
DOCKER_FLIST_PATH="$TEST_DIR/docker-test.fl"
# Store URL - using a local directory store for testing
STORE_DIR="$TEST_DIR/store"
STORE_URL="dir://$STORE_DIR"
# Clean up function
cleanup() {
echo "Cleaning up test directories..."
# Unmount if mounted
if mountpoint -q "$MOUNT_DIR"; then
sudo umount "$MOUNT_DIR"
fi
rm -rf "$TEST_DIR"
}
# Setup function
setup() {
echo "Setting up test directories..."
mkdir -p "$TEST_DIR" "$CACHE_DIR" "$SOURCE_DIR" "$DEST_DIR" "$MOUNT_DIR" "$STORE_DIR"
# Create some test files
echo "Creating test files..."
echo "This is a test file 1" > "$SOURCE_DIR/file1.txt"
echo "This is a test file 2" > "$SOURCE_DIR/file2.txt"
mkdir -p "$SOURCE_DIR/subdir"
echo "This is a test file in a subdirectory" > "$SOURCE_DIR/subdir/file3.txt"
# Create a symlink
ln -s "file1.txt" "$SOURCE_DIR/link_to_file1.txt"
# Create a smaller file for testing
dd if=/dev/urandom of="$SOURCE_DIR/random.bin" bs=1M count=1
}
# Function to run a test and report result
run_test() {
local test_name="$1"
local test_cmd="$2"
echo -e "\n${GREEN}Running test: $test_name${NC}"
echo "Command: $test_cmd"
if eval "$test_cmd"; then
echo -e "${GREEN}✓ Test passed: $test_name${NC}"
return 0
else
echo -e "${RED}✗ Test failed: $test_name${NC}"
return 1
fi
}
# Test the pack command
test_pack() {
run_test "Pack command" "$RFS_BIN pack -m $FLIST_PATH -s $STORE_URL $SOURCE_DIR"
# Verify the flist was created
if [ ! -f "$FLIST_PATH" ]; then
echo -e "${RED}Flist file was not created${NC}"
return 1
fi
echo "Flist created successfully at $FLIST_PATH"
return 0
}
# Test the unpack command
test_unpack() {
run_test "Unpack command" "$RFS_BIN unpack -m $FLIST_PATH -c $CACHE_DIR $DEST_DIR"
# Verify files were unpacked correctly
if ! diff -r "$SOURCE_DIR" "$DEST_DIR"; then
echo -e "${RED}Unpacked files don't match source files${NC}"
return 1
fi
echo "Files unpacked successfully to $DEST_DIR"
return 0
}
# Test the mount command (requires sudo)
test_mount() {
echo -e "\n${GREEN}Running test: Mount command${NC}"
echo "Command: sudo $RFS_BIN mount -m $FLIST_PATH -c $CACHE_DIR $MOUNT_DIR"
# Run the mount command in the background
sudo $RFS_BIN mount -m $FLIST_PATH -c $CACHE_DIR $MOUNT_DIR &
MOUNT_PID=$!
# Wait a moment for the mount to complete
sleep 3
# Verify the mount point is working
if ! mountpoint -q "$MOUNT_DIR"; then
echo -e "${RED}Mount failed${NC}"
kill $MOUNT_PID 2>/dev/null
return 1
fi
# Check if files are accessible
if ! ls -la "$MOUNT_DIR"; then
echo -e "${RED}Cannot list files in mount directory${NC}"
sudo umount "$MOUNT_DIR" 2>/dev/null
kill $MOUNT_PID 2>/dev/null
return 1
fi
# Read a file from the mount
if ! cat "$MOUNT_DIR/file1.txt"; then
echo -e "${RED}Cannot read file from mount${NC}"
sudo umount "$MOUNT_DIR" 2>/dev/null
kill $MOUNT_PID 2>/dev/null
return 1
fi
# Unmount
sudo umount "$MOUNT_DIR" 2>/dev/null
kill $MOUNT_PID 2>/dev/null
echo -e "${GREEN}✓ Test passed: Mount command${NC}"
echo "Mount test completed successfully"
return 0
}
# Test the docker command (requires docker)
test_docker() {
# Check if docker is available
if ! command -v docker &> /dev/null; then
echo -e "${RED}Docker is not installed, skipping docker test${NC}"
return 0
fi
echo -e "\n${GREEN}Running test: Docker command${NC}"
echo "Command: $RFS_BIN docker -i alpine:latest -s $STORE_URL"
# Pull a small test image
docker pull alpine:latest
# Convert docker image to flist with a timeout
timeout 60 $RFS_BIN docker -i alpine:latest -s $STORE_URL &
DOCKER_PID=$!
# Wait for the command to complete or timeout
wait $DOCKER_PID
RESULT=$?
if [ $RESULT -eq 124 ]; then
echo -e "${RED}Docker command timed out${NC}"
return 1
elif [ $RESULT -ne 0 ]; then
echo -e "${RED}Docker command failed with exit code $RESULT${NC}"
return 1
fi
# Verify the flist was created
if [ ! -f "alpine-latest.fl" ]; then
echo -e "${RED}Docker flist file was not created${NC}"
return 1
fi
echo -e "${GREEN}✓ Test passed: Docker command${NC}"
echo "Docker image converted to flist successfully"
return 0
}
# Test the config command
test_config() {
# Add a tag
run_test "Config tag add" "$RFS_BIN config -m $FLIST_PATH tag add -t test=value"
# List tags
run_test "Config tag list" "$RFS_BIN config -m $FLIST_PATH tag list"
# Add a store
run_test "Config store add" "$RFS_BIN config -m $FLIST_PATH store add -s $STORE_URL"
# List stores
run_test "Config store list" "$RFS_BIN config -m $FLIST_PATH store list"
return 0
}
# Main test function
main() {
# Register cleanup on exit
trap cleanup EXIT
# Setup test environment
setup
# Run tests
test_pack
test_unpack
test_config
# These tests may require sudo
echo -e "\n${GREEN}The following tests may require sudo:${NC}"
test_mount
test_docker
echo -e "\n${GREEN}All tests completed!${NC}"
}
# Run the main function
main

View File

@@ -0,0 +1,705 @@
#!/bin/bash
set -e
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
# Path to the rfs binary
RFS_BIN="../target/release/rfs"
# Test directory
TEST_DIR="/tmp/rfs-upload-download-tests"
CACHE_DIR="$TEST_DIR/cache"
SOURCE_DIR="$TEST_DIR/source"
DEST_DIR="$TEST_DIR/destination"
UPLOAD_DIR="$TEST_DIR/upload"
DOWNLOAD_DIR="$TEST_DIR/download"
# Store URL - using a local directory store for testing
STORE_DIR="$TEST_DIR/store"
STORE_URL="dir://$STORE_DIR"
# Server settings for testing
SERVER_PORT=8080
SERVER_URL="http://localhost:$SERVER_PORT"
SERVER_STORAGE="$TEST_DIR/server_storage"
SERVER_PID_FILE="$TEST_DIR/server.pid"
SERVER_CONFIG_FILE="$TEST_DIR/server_config.toml"
# Test file sizes
SMALL_FILE_SIZE_MB=1
MEDIUM_FILE_SIZE_MB=5
LARGE_FILE_SIZE_MB=10
# Clean up function
cleanup() {
echo "Cleaning up test environment..."
# Stop the main server if it's running
if [ -f "$SERVER_PID_FILE" ]; then
echo "Stopping test server..."
kill $(cat "$SERVER_PID_FILE") 2>/dev/null || true
rm -f "$SERVER_PID_FILE"
fi
# Stop the second server if it's running (for sync tests)
local SERVER2_PID_FILE="$TEST_DIR/server2.pid"
if [ -f "$SERVER2_PID_FILE" ]; then
echo "Stopping second test server..."
kill $(cat "$SERVER2_PID_FILE") 2>/dev/null || true
rm -f "$SERVER2_PID_FILE"
fi
# Remove test directories and files
rm -rf "$TEST_DIR"
echo "Cleanup complete"
}
# Create server configuration file
create_server_config() {
echo "Creating server configuration file..."
cat > "$SERVER_CONFIG_FILE" << EOF
# Server configuration for e2e tests
host="0.0.0.0"
port=8080
store_url=["dir:///tmp/store0"]
flist_dir="flists"
sqlite_path="fl-server.db"
storage_dir="storage"
# bloc_size=
jwt_secret="secret"
jwt_expire_hours=5
# users
[[users]]
username = "admin"
password = "admin"
EOF
echo "Server configuration file created at $SERVER_CONFIG_FILE"
}
# Start the server
start_server() {
echo -e "\n${GREEN}Starting test server on port $SERVER_PORT...${NC}"
# Create server storage directory
mkdir -p "$SERVER_STORAGE"
# Create server configuration
create_server_config
# Start the server in the background
$RFS_BIN server --config-path "$SERVER_CONFIG_FILE" > "$TEST_DIR/server.log" 2>&1 &
# Save the PID
echo $! > "$SERVER_PID_FILE"
# Wait for the server to start
echo "Waiting for server to start..."
sleep 3
# Check if the server is running
if ! curl -s "$SERVER_URL/health" > /dev/null; then
echo -e "${RED}Failed to start server${NC}"
cat "$TEST_DIR/server.log"
exit 1
fi
echo -e "${GREEN}Server started successfully${NC}"
}
# Setup function
setup() {
echo "Setting up test directories..."
mkdir -p "$TEST_DIR" "$CACHE_DIR" "$SOURCE_DIR" "$DEST_DIR" "$UPLOAD_DIR" "$DOWNLOAD_DIR" "$STORE_DIR" "$SERVER_STORAGE"
# Create test files of different sizes
echo "Creating test files..."
# Small file
echo -e "${YELLOW}Creating small test file (${SMALL_FILE_SIZE_MB}MB)...${NC}"
dd if=/dev/urandom of="$SOURCE_DIR/small_file.bin" bs=1M count=$SMALL_FILE_SIZE_MB status=none
# Medium file
echo -e "${YELLOW}Creating medium test file (${MEDIUM_FILE_SIZE_MB}MB)...${NC}"
dd if=/dev/urandom of="$SOURCE_DIR/medium_file.bin" bs=1M count=$MEDIUM_FILE_SIZE_MB status=none
# Large file
echo -e "${YELLOW}Creating large test file (${LARGE_FILE_SIZE_MB}MB)...${NC}"
dd if=/dev/urandom of="$SOURCE_DIR/large_file.bin" bs=1M count=$LARGE_FILE_SIZE_MB status=none
# Create a directory with multiple files
mkdir -p "$SOURCE_DIR/multi_files"
for i in {1..5}; do
dd if=/dev/urandom of="$SOURCE_DIR/multi_files/file_$i.bin" bs=512K count=1 status=none
done
# Create a nested directory structure
mkdir -p "$SOURCE_DIR/nested/dir1/dir2"
echo "Test content 1" > "$SOURCE_DIR/nested/file1.txt"
echo "Test content 2" > "$SOURCE_DIR/nested/dir1/file2.txt"
echo "Test content 3" > "$SOURCE_DIR/nested/dir1/dir2/file3.txt"
echo "Test files created successfully"
}
# Function to run a test and report result
run_test() {
local test_name="$1"
local test_cmd="$2"
echo -e "\n${GREEN}Running test: $test_name${NC}"
echo "Command: $test_cmd"
if eval "$test_cmd"; then
echo -e "${GREEN}✓ Test passed: $test_name${NC}"
return 0
else
echo -e "${RED}✗ Test failed: $test_name${NC}"
return 1
fi
}
# Function to measure execution time
measure_time() {
local start_time=$(date +%s.%N)
"$@"
local end_time=$(date +%s.%N)
echo "$(echo "$end_time - $start_time" | bc)"
}
# Test single file upload
test_single_file_upload() {
local file_path="$SOURCE_DIR/medium_file.bin"
local file_name=$(basename "$file_path")
local upload_time=$(measure_time $RFS_BIN upload "$file_path" -s "$SERVER_URL")
echo -e "Upload time for $file_name: ${YELLOW}$upload_time seconds${NC}"
# Verify the file was uploaded by checking if it exists in the store
# In a real test, we would verify this by querying the server
# For this test, we'll just check if the command succeeded
return 0
}
# Test directory upload
test_directory_upload() {
local dir_path="$SOURCE_DIR/multi_files"
local upload_time=$(measure_time $RFS_BIN upload-dir "$dir_path" -s "$SERVER_URL")
echo -e "Upload time for directory: ${YELLOW}$upload_time seconds${NC}"
# Verify the directory was uploaded
# In a real test, we would verify this by querying the server
return 0
}
# Test nested directory upload
test_nested_directory_upload() {
local dir_path="$SOURCE_DIR/nested"
local upload_time=$(measure_time $RFS_BIN upload-dir "$dir_path" -s "$SERVER_URL" --create-flist)
echo -e "Upload time for nested directory with flist: ${YELLOW}$upload_time seconds${NC}"
# Verify the directory was uploaded and flist was created
# In a real test, we would verify this by querying the server
return 0
}
# Test single file download
test_single_file_download() {
# First, upload a file to get its hash
local file_path="$SOURCE_DIR/medium_file.bin"
local file_name=$(basename "$file_path")
echo -e "\n${GREEN}Uploading file to get hash: $file_path${NC}"
local upload_output
upload_output=$($RFS_BIN upload "$file_path" -s "$SERVER_URL" 2>&1)
echo "$upload_output"
# Extract the file hash from the upload output
local file_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
if [ -z "$file_hash" ]; then
echo -e "${RED}Failed to get file hash from upload${NC}"
echo -e "${RED}Upload output: ${NC}"
echo "$upload_output"
return 1
fi
echo "File hash: $file_hash"
# Now download the file using its hash
local download_path="$DOWNLOAD_DIR/$file_name"
local download_time=$(measure_time $RFS_BIN download "$file_hash" -o "$download_path" -s "$SERVER_URL")
echo -e "Download time for $file_name: ${YELLOW}$download_time seconds${NC}"
# Verify the file was downloaded correctly
if [ ! -f "$download_path" ]; then
echo -e "${RED}Downloaded file does not exist${NC}"
return 1
fi
# Compare the original and downloaded files
if ! cmp -s "$file_path" "$download_path"; then
echo -e "${RED}Downloaded file does not match original${NC}"
return 1
fi
echo -e "${GREEN}Downloaded file matches original${NC}"
return 0
}
# Test directory download
test_directory_download() {
# First, upload a directory with flist to get its hash
local dir_path="$SOURCE_DIR/nested"
echo -e "\n${GREEN}Uploading directory with flist to get hash: $dir_path${NC}"
local upload_output
upload_output=$($RFS_BIN upload-dir "$dir_path" -s "$SERVER_URL" --create-flist 2>&1)
echo "$upload_output"
# Extract the flist hash from the upload output
local flist_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
if [ -z "$flist_hash" ]; then
echo -e "${RED}Failed to get flist hash from upload${NC}"
echo -e "${RED}Upload output: ${NC}"
echo "$upload_output"
return 1
fi
echo "Flist hash: $flist_hash"
# Now download the directory using the flist hash
local download_dir="$DOWNLOAD_DIR/nested"
mkdir -p "$download_dir"
local download_time=$(measure_time $RFS_BIN download-dir "$flist_hash" -o "$download_dir" -s "$SERVER_URL")
echo -e "Download time for directory: ${YELLOW}$download_time seconds${NC}"
# Verify the directory was downloaded correctly
if [ ! -d "$download_dir" ]; then
echo -e "${RED}Downloaded directory does not exist${NC}"
return 1
fi
# Compare the original and downloaded directories
if ! diff -r "$dir_path" "$download_dir"; then
echo -e "${RED}Downloaded directory does not match original${NC}"
return 1
fi
echo -e "${GREEN}Downloaded directory matches original${NC}"
return 0
}
# Test parallel upload performance
test_parallel_upload_performance() {
echo -e "\n${GREEN}Testing parallel upload performance...${NC}"
# Create a directory with many small files for testing parallel upload
local parallel_dir="$SOURCE_DIR/parallel_test"
mkdir -p "$parallel_dir"
echo -e "${YELLOW}Creating 20 small files for parallel upload test...${NC}"
for i in {1..20}; do
dd if=/dev/urandom of="$parallel_dir/file_$i.bin" bs=512K count=1 status=none
echo -ne "\rCreated $i/20 files"
done
echo -e "\nTest files created successfully"
# Test with default parallel upload (PARALLEL_UPLOAD=20)
echo -e "${YELLOW}Testing with default parallel upload...${NC}"
local parallel_time=$(measure_time $RFS_BIN upload-dir "$parallel_dir" -s "$SERVER_URL")
# Test with reduced parallelism
echo -e "${YELLOW}Testing with reduced parallelism...${NC}"
local serial_time=$(measure_time env RFS_PARALLEL_UPLOAD=1 $RFS_BIN upload-dir "$parallel_dir" -s "$SERVER_URL")
echo -e "Serial upload time: ${YELLOW}$serial_time seconds${NC}"
echo -e "Parallel upload time: ${YELLOW}$parallel_time seconds${NC}"
# Calculate speedup
local speedup=$(echo "scale=2; $serial_time / $parallel_time" | bc)
echo -e "Speedup: ${GREEN}${speedup}x${NC}"
return 0
}
# Test parallel download performance
test_parallel_download_performance() {
echo -e "\n${GREEN}Testing parallel download performance...${NC}"
# First, upload a directory with many files to get its hash
local parallel_dir="$SOURCE_DIR/parallel_test"
echo -e "\n${GREEN}Uploading directory with flist for parallel test: $parallel_dir${NC}"
local upload_output
upload_output=$($RFS_BIN upload-dir "$parallel_dir" -s "$SERVER_URL" --create-flist 2>&1)
echo "$upload_output"
# Extract the flist hash from the upload output
local flist_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
if [ -z "$flist_hash" ]; then
echo -e "${RED}Failed to get flist hash from upload${NC}"
echo -e "${RED}Upload output: ${NC}"
echo "$upload_output"
return 1
fi
echo "Flist hash: $flist_hash"
# Test with default parallel download (PARALLEL_DOWNLOAD=20)
echo -e "${YELLOW}Testing with default parallel download...${NC}"
local download_dir_parallel="$DOWNLOAD_DIR/parallel"
mkdir -p "$download_dir_parallel"
local parallel_time=$(measure_time $RFS_BIN download-dir "$flist_hash" -o "$download_dir_parallel" -s "$SERVER_URL")
# Test with reduced parallelism
echo -e "${YELLOW}Testing with reduced parallelism...${NC}"
local download_dir_serial="$DOWNLOAD_DIR/serial"
mkdir -p "$download_dir_serial"
local serial_time=$(measure_time env RFS_PARALLEL_DOWNLOAD=1 $RFS_BIN download-dir "$flist_hash" -o "$download_dir_serial" -s "$SERVER_URL")
echo -e "Serial download time: ${YELLOW}$serial_time seconds${NC}"
echo -e "Parallel download time: ${YELLOW}$parallel_time seconds${NC}"
# Calculate speedup
local speedup=$(echo "scale=2; $serial_time / $parallel_time" | bc)
echo -e "Speedup: ${GREEN}${speedup}x${NC}"
# Verify downloaded directories match
if ! diff -r "$download_dir_serial" "$download_dir_parallel"; then
echo -e "${RED}Downloaded directories don't match between serial and parallel methods${NC}"
return 1
fi
echo -e "${GREEN}Downloaded directories match between methods${NC}"
return 0
}
# Test upload with different block sizes
test_block_size_impact() {
echo -e "\n${GREEN}Testing impact of block size on upload/download...${NC}"
local file_path="$SOURCE_DIR/large_file.bin"
local file_name=$(basename "$file_path")
# Test with different block sizes
for block_size in 256 512 1024 2048; do
echo -e "${YELLOW}Testing with block size: ${block_size}KB${NC}"
# Upload with specific block size
local upload_time=$(measure_time $RFS_BIN upload "$file_path" -s "$SERVER_URL" -b $((block_size * 1024)))
echo -e "\n${GREEN}Uploading file with ${block_size}KB blocks: $file_path${NC}"
local upload_output
upload_output=$($RFS_BIN upload "$file_path" -s "$SERVER_URL" -b $((block_size * 1024)) 2>&1)
echo "$upload_output"
# Extract the file hash from the upload output
local file_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
if [ -z "$file_hash" ]; then
echo -e "${RED}Failed to get file hash from upload with ${block_size}KB blocks${NC}"
echo -e "${RED}Upload output: ${NC}"
echo "$upload_output"
continue
fi
echo -e "Upload time with ${block_size}KB blocks: ${YELLOW}$upload_time seconds${NC}"
# Download with the same hash
local download_path="$DOWNLOAD_DIR/${block_size}kb_${file_name}"
local download_time=$(measure_time $RFS_BIN download "$file_hash" -o "$download_path" -s "$SERVER_URL")
echo -e "Download time with ${block_size}KB blocks: ${YELLOW}$download_time seconds${NC}"
# Verify the file was downloaded correctly
if ! cmp -s "$file_path" "$download_path"; then
echo -e "${RED}Downloaded file with ${block_size}KB blocks does not match original${NC}"
return 1
fi
done
echo -e "${GREEN}All block size tests passed${NC}"
return 0
}
# Test exists command
test_exists_command() {
echo -e "\n${GREEN}Testing exists command...${NC}"
# First, upload a file to check
local file_path="$SOURCE_DIR/medium_file.bin"
echo -e "\n${GREEN}Uploading file to check existence: $file_path${NC}"
local upload_output
upload_output=$($RFS_BIN upload "$file_path" -s "$SERVER_URL" 2>&1)
echo "$upload_output"
# Extract the file hash from the upload output
local file_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
if [ -z "$file_hash" ]; then
echo -e "${RED}Failed to get file hash from upload${NC}"
echo -e "${RED}Upload output: ${NC}"
echo "$upload_output"
return 1
fi
# Test exists command with file path
echo -e "\n${GREEN}Testing exists with file path${NC}"
run_test "Exists command with file path" "$RFS_BIN exists \"$file_path\" -s \"$SERVER_URL\""
# Test exists command with hash
echo -e "\n${GREEN}Testing exists with hash${NC}"
run_test "Exists command with hash" "$RFS_BIN exists \"$file_hash\" -s \"$SERVER_URL\""
# Test exists command with non-existent file
echo -e "\n${GREEN}Testing exists with non-existent file${NC}"
local non_existent_file="$SOURCE_DIR/non_existent_file.bin"
touch "$non_existent_file"
echo "This file should not exist on the server" > "$non_existent_file"
# This should report that the file doesn't exist, but the command should succeed
run_test "Exists command with non-existent file" "$RFS_BIN exists \"$non_existent_file\" -s \"$SERVER_URL\""
return 0
}
# Test website-publish command
test_website_publish() {
echo -e "\n${GREEN}Testing website-publish command...${NC}"
# Create a simple website in a temporary directory
local website_dir="$SOURCE_DIR/website"
mkdir -p "$website_dir"
# Create index.html
cat > "$website_dir/index.html" << EOF
<!DOCTYPE html>
<html>
<head>
<title>Test Website</title>
<link rel="stylesheet" href="style.css">
</head>
<body>
<h1>Test Website</h1>
<p>This is a test website for RFS.</p>
<img src="image.png" alt="Test Image">
</body>
</html>
EOF
# Create style.css
cat > "$website_dir/style.css" << EOF
body {
font-family: Arial, sans-serif;
margin: 0;
padding: 20px;
background-color: #f0f0f0;
}
h1 {
color: #333;
}
EOF
# Create a simple image
dd if=/dev/urandom bs=1024 count=10 | base64 > "$website_dir/image.png"
# Publish the website
echo -e "\n${GREEN}Publishing website: $website_dir${NC}"
local publish_output
publish_output=$($RFS_BIN website-publish "$website_dir" -s "$SERVER_URL" 2>&1)
echo "$publish_output"
# Extract the website hash and URL from the output
local website_hash=$(echo "$publish_output" | grep -o "Website hash: [a-f0-9]*" | cut -d' ' -f3)
local website_url=$(echo "$publish_output" | grep -o "Website URL: .*" | cut -d' ' -f3)
if [ -z "$website_hash" ]; then
echo -e "${RED}Failed to get website hash from publish output${NC}"
echo -e "${RED}Publish output: ${NC}"
echo "$publish_output"
return 1
fi
echo -e "Website hash: ${YELLOW}$website_hash${NC}"
echo -e "Website URL: ${YELLOW}$website_url${NC}"
# Verify the website is accessible
echo -e "\n${GREEN}Verifying website is accessible...${NC}"
if curl -s "$website_url" | grep -q "Test Website"; then
echo -e "${GREEN}Website is accessible${NC}"
else
echo -e "${RED}Website is not accessible${NC}"
return 1
fi
return 0
}
# Test sync command
test_sync_command() {
echo -e "\n${GREEN}Testing sync command...${NC}"
# We need a second server to test sync
# For this test, we'll create a second server configuration and start it
local SERVER2_PORT=8081
local SERVER2_URL="http://localhost:$SERVER2_PORT"
local SERVER2_STORAGE="$TEST_DIR/server2_storage"
local SERVER2_PID_FILE="$TEST_DIR/server2.pid"
local SERVER2_CONFIG_FILE="$TEST_DIR/server2_config.toml"
# Create second server storage directory
mkdir -p "$SERVER2_STORAGE"
# Create second server configuration
cat > "$SERVER2_CONFIG_FILE" << EOF
# Server configuration for e2e tests (server 2)
host="0.0.0.0"
port=8081
store_url=["dir:///tmp/store1"]
flist_dir="flists"
sqlite_path="fl-server2.db"
storage_dir="storage"
# bloc_size=
jwt_secret="secret"
jwt_expire_hours=5
# users
[[users]]
username = "admin"
password = "admin"
EOF
# Start the second server
echo -e "\n${GREEN}Starting second test server on port $SERVER2_PORT...${NC}"
$RFS_BIN server --config-path "$SERVER2_CONFIG_FILE" > "$TEST_DIR/server2.log" 2>&1 &
echo $! > "$SERVER2_PID_FILE"
# Wait for the server to start
echo "Waiting for second server to start..."
sleep 3
# Check if the server is running
if ! curl -s "$SERVER2_URL/health" > /dev/null; then
echo -e "${RED}Failed to start second server${NC}"
cat "$TEST_DIR/server2.log"
return 1
fi
echo -e "${GREEN}Second server started successfully${NC}"
# Upload a file to the first server
local file_path="$SOURCE_DIR/medium_file.bin"
echo -e "\n${GREEN}Uploading file to first server: $file_path${NC}"
local upload_output
upload_output=$($RFS_BIN upload "$file_path" -s "$SERVER_URL" 2>&1)
echo "$upload_output"
# Extract the file hash from the upload output
local file_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
if [ -z "$file_hash" ]; then
echo -e "${RED}Failed to get file hash from upload${NC}"
echo -e "${RED}Upload output: ${NC}"
echo "$upload_output"
return 1
fi
# Verify the file exists on the first server but not on the second
echo -e "\n${GREEN}Verifying file exists on first server but not on second...${NC}"
$RFS_BIN exists "$file_hash" -s "$SERVER_URL"
$RFS_BIN exists "$file_hash" -s "$SERVER2_URL" || true # This should fail, but we don't want to exit
# Sync the file from the first server to the second
echo -e "\n${GREEN}Syncing file from first server to second...${NC}"
run_test "Sync command with hash" "$RFS_BIN sync -h \"$file_hash\" -s \"$SERVER_URL\" -d \"$SERVER2_URL\""
# Verify the file now exists on both servers
echo -e "\n${GREEN}Verifying file now exists on both servers...${NC}"
run_test "Exists on first server after sync" "$RFS_BIN exists \"$file_hash\" -s \"$SERVER_URL\""
run_test "Exists on second server after sync" "$RFS_BIN exists \"$file_hash\" -s \"$SERVER2_URL\""
# Test sync all blocks
echo -e "\n${GREEN}Testing sync all blocks...${NC}"
# Upload another file to the first server
local file2_path="$SOURCE_DIR/small_file.bin"
echo -e "\n${GREEN}Uploading second file to first server: $file2_path${NC}"
local upload2_output
upload2_output=$($RFS_BIN upload "$file2_path" -s "$SERVER_URL" 2>&1)
echo "$upload2_output"
# Sync all blocks from the first server to the second
echo -e "\n${GREEN}Syncing all blocks from first server to second...${NC}"
run_test "Sync command for all blocks" "$RFS_BIN sync -s \"$SERVER_URL\" -d \"$SERVER2_URL\""
# Stop the second server
if [ -f "$SERVER2_PID_FILE" ]; then
echo "Stopping second test server..."
kill $(cat "$SERVER2_PID_FILE") 2>/dev/null || true
rm -f "$SERVER2_PID_FILE"
fi
return 0
}
# Main test function
main() {
# Register cleanup on exit
trap cleanup EXIT
# Setup test environment
setup
# Start the server
start_server
# Run upload tests
test_single_file_upload
test_directory_upload
test_nested_directory_upload
# Run download tests
test_single_file_download
test_directory_download
# Run performance tests
test_parallel_upload_performance
test_parallel_download_performance
# Run block size impact tests
test_block_size_impact
# Run exists command tests
test_exists_command
# Run website-publish tests
test_website_publish
# Run sync command tests
test_sync_command
echo -e "\n${GREEN}All upload and download tests completed!${NC}"
}
# Run the main function
main

View File

@@ -0,0 +1,120 @@
#[cfg(test)]
mod parallel_download_tests {
use anyhow::Result;
use std::path::Path;
use std::time::Instant;
use tempdir::TempDir;
use tokio::runtime::Runtime;
use rfs::cache::Cache;
use rfs::fungi::{self, meta};
use rfs::store::{self, dir::DirStore};
use rfs::{pack, unpack};
#[test]
fn test_parallel_download() -> Result<()> {
// Create a runtime for async operations
let rt = Runtime::new()?;
rt.block_on(async {
// Create temporary directories
let temp_dir = TempDir::new("parallel-test")?;
let source_dir = temp_dir.path().join("source");
let dest_dir_parallel = temp_dir.path().join("dest-parallel");
let dest_dir_serial = temp_dir.path().join("dest-serial");
let store_dir = temp_dir.path().join("store");
let cache_dir = temp_dir.path().join("cache");
std::fs::create_dir_all(&source_dir)?;
std::fs::create_dir_all(&dest_dir_parallel)?;
std::fs::create_dir_all(&dest_dir_serial)?;
std::fs::create_dir_all(&store_dir)?;
std::fs::create_dir_all(&cache_dir)?;
// Create some test files
create_test_files(&source_dir, 20, 1024 * 1024).await?; // 20 files of 1MB each
// Create a store
let store = DirStore::new(&store_dir).await?;
// Create a flist writer
let fl_path = temp_dir.path().join("test.fl");
let writer = fungi::Writer::new(&fl_path, true).await?;
// Pack the files
pack(writer, store.clone(), &source_dir, true, None).await?;
// Create a reader for the flist
let reader = fungi::Reader::new(&fl_path).await?;
let router = store::get_router(&reader).await?;
// Test parallel download (default)
let cache_parallel = Cache::new(&cache_dir, router.clone());
let start_parallel = Instant::now();
unpack(&reader, &cache_parallel, &dest_dir_parallel, false).await?;
let parallel_duration = start_parallel.elapsed();
// Clear cache directory
std::fs::remove_dir_all(&cache_dir)?;
std::fs::create_dir_all(&cache_dir)?;
// Test serial download by setting PARALLEL_DOWNLOAD to 1
// This is just a simulation since we can't easily modify the constant at runtime
// In a real test, we would use a feature flag or environment variable
let cache_serial = Cache::new(&cache_dir, router);
let start_serial = Instant::now();
// Here we're still using the parallel implementation, but in a real test
// we would use a version with PARALLEL_DOWNLOAD=1
unpack(&reader, &cache_serial, &dest_dir_serial, false).await?;
let serial_duration = start_serial.elapsed();
// Print the results
println!("Parallel download time: {:?}", parallel_duration);
println!("Serial download time: {:?}", serial_duration);
// Verify files were unpacked correctly
verify_directories(&source_dir, &dest_dir_parallel)?;
verify_directories(&source_dir, &dest_dir_serial)?;
Ok(())
})
}
// Helper function to create test files
async fn create_test_files(dir: &Path, count: usize, size: usize) -> Result<()> {
use rand::{thread_rng, Rng};
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
for i in 0..count {
let file_path = dir.join(format!("file_{}.bin", i));
let mut file = File::create(&file_path).await?;
// Create random data
let mut data = vec![0u8; size];
thread_rng().fill(&mut data[..]);
// Write to file
file.write_all(&data).await?;
file.flush().await?;
}
Ok(())
}
// Helper function to verify directories match
fn verify_directories(source: &Path, dest: &Path) -> Result<()> {
use std::process::Command;
let output = Command::new("diff")
.arg("-r")
.arg(source)
.arg(dest)
.output()?;
assert!(output.status.success(), "Directories don't match");
Ok(())
}
}

View File

@@ -0,0 +1,124 @@
#!/bin/bash
set -e
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
# Test directory
TEST_DIR="/tmp/rfs-performance-tests"
CACHE_DIR="$TEST_DIR/cache"
SOURCE_DIR="$TEST_DIR/source"
DEST_DIR_SERIAL="$TEST_DIR/destination-serial"
DEST_DIR_PARALLEL="$TEST_DIR/destination-parallel"
FLIST_PATH="$TEST_DIR/perf-test.fl"
# Store URL - using a local directory store for testing
STORE_DIR="$TEST_DIR/store"
STORE_URL="dir://$STORE_DIR"
# Number of files and file size for testing
NUM_FILES=100
FILE_SIZE_MB=1
# Clean up function
cleanup() {
echo "Cleaning up test directories..."
rm -rf "$TEST_DIR"
}
# Setup function
setup() {
echo "Setting up test directories..."
mkdir -p "$TEST_DIR" "$CACHE_DIR" "$SOURCE_DIR" "$DEST_DIR_SERIAL" "$DEST_DIR_PARALLEL" "$STORE_DIR"
echo -e "${YELLOW}Creating $NUM_FILES test files of ${FILE_SIZE_MB}MB each...${NC}"
for i in $(seq 1 $NUM_FILES); do
dd if=/dev/urandom of="$SOURCE_DIR/file_$i.bin" bs=1M count=$FILE_SIZE_MB status=none
echo -ne "\rCreated $i/$NUM_FILES files"
done
echo -e "\nTest files created successfully"
}
# Function to measure execution time
measure_time() {
local start_time=$(date +%s.%N)
"$@"
local end_time=$(date +%s.%N)
echo "$(echo "$end_time - $start_time" | bc)"
}
# Test pack performance
test_pack_performance() {
echo -e "\n${GREEN}Testing pack performance...${NC}"
local pack_time=$(measure_time rfs pack -m "$FLIST_PATH" -s "$STORE_URL" "$SOURCE_DIR")
echo -e "Pack time: ${YELLOW}$pack_time seconds${NC}"
# Verify the flist was created
if [ ! -f "$FLIST_PATH" ]; then
echo -e "${RED}Flist file was not created${NC}"
return 1
fi
echo "Flist created successfully at $FLIST_PATH"
return 0
}
# Test unpack performance with and without parallel download
test_unpack_performance() {
echo -e "\n${GREEN}Testing unpack performance...${NC}"
# Clear cache directory to ensure fair comparison
rm -rf "$CACHE_DIR"
mkdir -p "$CACHE_DIR"
# Test with parallel download (default)
echo -e "${YELLOW}Testing with parallel download...${NC}"
local parallel_time=$(measure_time rfs unpack -m "$FLIST_PATH" -c "$CACHE_DIR" "$DEST_DIR_PARALLEL")
# Clear cache directory again
rm -rf "$CACHE_DIR"
mkdir -p "$CACHE_DIR"
# Temporarily disable parallel download by setting PARALLEL_DOWNLOAD to 1
echo -e "${YELLOW}Testing with serial download...${NC}"
local serial_time=$(measure_time env RFS_PARALLEL_DOWNLOAD=1 rfs unpack -m "$FLIST_PATH" -c "$CACHE_DIR" "$DEST_DIR_SERIAL")
echo -e "Serial unpack time: ${YELLOW}$serial_time seconds${NC}"
echo -e "Parallel unpack time: ${YELLOW}$parallel_time seconds${NC}"
# Calculate speedup
local speedup=$(echo "scale=2; $serial_time / $parallel_time" | bc)
echo -e "Speedup: ${GREEN}${speedup}x${NC}"
# Verify files were unpacked correctly
if ! diff -r "$DEST_DIR_SERIAL" "$DEST_DIR_PARALLEL" > /dev/null; then
echo -e "${RED}Unpacked files don't match between serial and parallel methods${NC}"
return 1
fi
echo "Files unpacked successfully and match between methods"
return 0
}
# Main test function
main() {
# Register cleanup on exit
trap cleanup EXIT
# Setup test environment
setup
# Run performance tests
test_pack_performance
test_unpack_performance
echo -e "\n${GREEN}All performance tests completed!${NC}"
}
# Run the main function
main