Compare commits
3 Commits
7856fc0a4e
...
developmen
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0e63efda61 | ||
|
|
568a5b0a49 | ||
|
|
d431705501 |
227
.github/workflows/publish.yml
vendored
227
.github/workflows/publish.yml
vendored
@@ -1,227 +0,0 @@
|
||||
name: Publish SAL Crates
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to publish (e.g., 0.1.0)'
|
||||
required: true
|
||||
type: string
|
||||
dry_run:
|
||||
description: 'Dry run (do not actually publish)'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
name: Publish to crates.io
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Cache Cargo dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-edit for version management
|
||||
run: cargo install cargo-edit
|
||||
|
||||
- name: Set version from release tag
|
||||
if: github.event_name == 'release'
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "PUBLISH_VERSION=$VERSION" >> $GITHUB_ENV
|
||||
echo "Publishing version: $VERSION"
|
||||
|
||||
- name: Set version from workflow input
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "PUBLISH_VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
|
||||
echo "Publishing version: ${{ github.event.inputs.version }}"
|
||||
|
||||
- name: Update version in all crates
|
||||
run: |
|
||||
echo "Updating version to $PUBLISH_VERSION"
|
||||
|
||||
# Update root Cargo.toml
|
||||
cargo set-version $PUBLISH_VERSION
|
||||
|
||||
# Update each crate
|
||||
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
|
||||
for crate in "${CRATES[@]}"; do
|
||||
if [ -d "$crate" ]; then
|
||||
cd "$crate"
|
||||
cargo set-version $PUBLISH_VERSION
|
||||
cd ..
|
||||
echo "Updated $crate to version $PUBLISH_VERSION"
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --workspace --verbose
|
||||
|
||||
- name: Check formatting
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
- name: Run clippy
|
||||
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
- name: Dry run publish (check packages)
|
||||
run: |
|
||||
echo "Checking all packages can be published..."
|
||||
|
||||
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
|
||||
for crate in "${CRATES[@]}"; do
|
||||
if [ -d "$crate" ]; then
|
||||
echo "Checking $crate..."
|
||||
cd "$crate"
|
||||
cargo publish --dry-run
|
||||
cd ..
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Checking main crate..."
|
||||
cargo publish --dry-run
|
||||
|
||||
- name: Publish crates (dry run)
|
||||
if: github.event.inputs.dry_run == 'true'
|
||||
run: |
|
||||
echo "🔍 DRY RUN MODE - Would publish the following crates:"
|
||||
echo "Individual crates: sal-os, sal-process, sal-text, sal-net, sal-git, sal-vault, sal-kubernetes, sal-virt, sal-redisclient, sal-postgresclient, sal-zinit-client, sal-mycelium, sal-rhai"
|
||||
echo "Meta-crate: sal"
|
||||
echo "Version: $PUBLISH_VERSION"
|
||||
|
||||
- name: Publish individual crates
|
||||
if: github.event.inputs.dry_run != 'true'
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
||||
run: |
|
||||
echo "Publishing individual crates..."
|
||||
|
||||
# Crates in dependency order
|
||||
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
|
||||
|
||||
for crate in "${CRATES[@]}"; do
|
||||
if [ -d "$crate" ]; then
|
||||
echo "Publishing sal-$crate..."
|
||||
cd "$crate"
|
||||
|
||||
# Retry logic for transient failures
|
||||
for attempt in 1 2 3; do
|
||||
if cargo publish --token $CARGO_REGISTRY_TOKEN; then
|
||||
echo "✅ sal-$crate published successfully"
|
||||
break
|
||||
else
|
||||
if [ $attempt -eq 3 ]; then
|
||||
echo "❌ Failed to publish sal-$crate after 3 attempts"
|
||||
exit 1
|
||||
else
|
||||
echo "⚠️ Attempt $attempt failed, retrying in 30 seconds..."
|
||||
sleep 30
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
cd ..
|
||||
|
||||
# Wait for crates.io to process
|
||||
if [ "$crate" != "rhai" ]; then
|
||||
echo "⏳ Waiting 30 seconds for crates.io to process..."
|
||||
sleep 30
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Publish main crate
|
||||
if: github.event.inputs.dry_run != 'true'
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
||||
run: |
|
||||
echo "Publishing main sal crate..."
|
||||
|
||||
# Wait a bit longer before publishing the meta-crate
|
||||
echo "⏳ Waiting 60 seconds for all individual crates to be available..."
|
||||
sleep 60
|
||||
|
||||
# Retry logic for the main crate
|
||||
for attempt in 1 2 3; do
|
||||
if cargo publish --token $CARGO_REGISTRY_TOKEN; then
|
||||
echo "✅ Main sal crate published successfully"
|
||||
break
|
||||
else
|
||||
if [ $attempt -eq 3 ]; then
|
||||
echo "❌ Failed to publish main sal crate after 3 attempts"
|
||||
exit 1
|
||||
else
|
||||
echo "⚠️ Attempt $attempt failed, retrying in 60 seconds..."
|
||||
sleep 60
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Create summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## 📦 SAL Publishing Summary" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Version:** $PUBLISH_VERSION" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Trigger:** ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
if [ "${{ github.event.inputs.dry_run }}" == "true" ]; then
|
||||
echo "**Mode:** Dry Run" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "**Mode:** Live Publishing" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Published Crates" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-os" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-process" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-text" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-net" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-git" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-vault" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-kubernetes" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-virt" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-redisclient" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-postgresclient" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-zinit-client" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-mycelium" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal-rhai" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sal (meta-crate)" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Usage" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo '```bash' >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Individual crates" >> $GITHUB_STEP_SUMMARY
|
||||
echo "cargo add sal-os sal-process sal-text" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Meta-crate with features" >> $GITHUB_STEP_SUMMARY
|
||||
echo "cargo add sal --features core" >> $GITHUB_STEP_SUMMARY
|
||||
echo "cargo add sal --features all" >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
233
.github/workflows/test-publish.yml
vendored
233
.github/workflows/test-publish.yml
vendored
@@ -1,233 +0,0 @@
|
||||
name: Test Publishing Setup
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, master ]
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- 'scripts/publish-all.sh'
|
||||
- '.github/workflows/publish.yml'
|
||||
pull_request:
|
||||
branches: [ main, master ]
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- 'scripts/publish-all.sh'
|
||||
- '.github/workflows/publish.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
test-publish-setup:
|
||||
name: Test Publishing Setup
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Cache Cargo dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-publish-test-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-publish-test-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-edit
|
||||
run: cargo install cargo-edit
|
||||
|
||||
- name: Test workspace structure
|
||||
run: |
|
||||
echo "Testing workspace structure..."
|
||||
|
||||
# Check that all expected crates exist
|
||||
EXPECTED_CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo)
|
||||
|
||||
for crate in "${EXPECTED_CRATES[@]}"; do
|
||||
if [ -d "$crate" ] && [ -f "$crate/Cargo.toml" ]; then
|
||||
echo "✅ $crate exists"
|
||||
else
|
||||
echo "❌ $crate missing or invalid"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test feature configuration
|
||||
run: |
|
||||
echo "Testing feature configuration..."
|
||||
|
||||
# Test that features work correctly
|
||||
cargo check --features os
|
||||
cargo check --features process
|
||||
cargo check --features text
|
||||
cargo check --features net
|
||||
cargo check --features git
|
||||
cargo check --features vault
|
||||
cargo check --features kubernetes
|
||||
cargo check --features virt
|
||||
cargo check --features redisclient
|
||||
cargo check --features postgresclient
|
||||
cargo check --features zinit_client
|
||||
cargo check --features mycelium
|
||||
cargo check --features rhai
|
||||
|
||||
echo "✅ All individual features work"
|
||||
|
||||
# Test feature groups
|
||||
cargo check --features core
|
||||
cargo check --features clients
|
||||
cargo check --features infrastructure
|
||||
cargo check --features scripting
|
||||
|
||||
echo "✅ All feature groups work"
|
||||
|
||||
# Test all features
|
||||
cargo check --features all
|
||||
|
||||
echo "✅ All features together work"
|
||||
|
||||
- name: Test dry-run publishing
|
||||
run: |
|
||||
echo "Testing dry-run publishing..."
|
||||
|
||||
# Test each individual crate can be packaged
|
||||
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
|
||||
|
||||
for crate in "${CRATES[@]}"; do
|
||||
echo "Testing sal-$crate..."
|
||||
cd "$crate"
|
||||
cargo publish --dry-run
|
||||
cd ..
|
||||
echo "✅ sal-$crate can be published"
|
||||
done
|
||||
|
||||
# Test main crate
|
||||
echo "Testing main sal crate..."
|
||||
cargo publish --dry-run
|
||||
echo "✅ Main sal crate can be published"
|
||||
|
||||
- name: Test publishing script
|
||||
run: |
|
||||
echo "Testing publishing script..."
|
||||
|
||||
# Make script executable
|
||||
chmod +x scripts/publish-all.sh
|
||||
|
||||
# Test dry run
|
||||
./scripts/publish-all.sh --dry-run --version 0.1.0-test
|
||||
|
||||
echo "✅ Publishing script works"
|
||||
|
||||
- name: Test version consistency
|
||||
run: |
|
||||
echo "Testing version consistency..."
|
||||
|
||||
# Get version from root Cargo.toml
|
||||
ROOT_VERSION=$(grep '^version = ' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/')
|
||||
echo "Root version: $ROOT_VERSION"
|
||||
|
||||
# Check all crates have the same version
|
||||
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo)
|
||||
|
||||
for crate in "${CRATES[@]}"; do
|
||||
if [ -f "$crate/Cargo.toml" ]; then
|
||||
CRATE_VERSION=$(grep '^version = ' "$crate/Cargo.toml" | head -1 | sed 's/version = "\(.*\)"/\1/')
|
||||
if [ "$CRATE_VERSION" = "$ROOT_VERSION" ]; then
|
||||
echo "✅ $crate version matches: $CRATE_VERSION"
|
||||
else
|
||||
echo "❌ $crate version mismatch: $CRATE_VERSION (expected $ROOT_VERSION)"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Test metadata completeness
|
||||
run: |
|
||||
echo "Testing metadata completeness..."
|
||||
|
||||
# Check that all crates have required metadata
|
||||
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
|
||||
|
||||
for crate in "${CRATES[@]}"; do
|
||||
echo "Checking sal-$crate metadata..."
|
||||
cd "$crate"
|
||||
|
||||
# Check required fields exist
|
||||
if ! grep -q '^name = "sal-' Cargo.toml; then
|
||||
echo "❌ $crate missing or incorrect name"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! grep -q '^description = ' Cargo.toml; then
|
||||
echo "❌ $crate missing description"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! grep -q '^repository = ' Cargo.toml; then
|
||||
echo "❌ $crate missing repository"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! grep -q '^license = ' Cargo.toml; then
|
||||
echo "❌ $crate missing license"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ sal-$crate metadata complete"
|
||||
cd ..
|
||||
done
|
||||
|
||||
- name: Test dependency resolution
|
||||
run: |
|
||||
echo "Testing dependency resolution..."
|
||||
|
||||
# Test that all workspace dependencies resolve correctly
|
||||
cargo tree --workspace > /dev/null
|
||||
echo "✅ All dependencies resolve correctly"
|
||||
|
||||
# Test that there are no dependency conflicts
|
||||
cargo check --workspace
|
||||
echo "✅ No dependency conflicts"
|
||||
|
||||
- name: Generate publishing report
|
||||
if: always()
|
||||
run: |
|
||||
echo "## 🧪 Publishing Setup Test Report" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### ✅ Tests Passed" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Workspace structure validation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Feature configuration testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Dry-run publishing simulation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Publishing script validation" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Version consistency check" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Metadata completeness verification" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Dependency resolution testing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📦 Ready for Publishing" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "All SAL crates are ready for publishing to crates.io!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Individual Crates:** 13 modules" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Meta-crate:** sal with optional features" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Binary:** herodo script executor" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🚀 Next Steps" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "1. Create a release tag (e.g., v0.1.0)" >> $GITHUB_STEP_SUMMARY
|
||||
echo "2. The publish workflow will automatically trigger" >> $GITHUB_STEP_SUMMARY
|
||||
echo "3. All crates will be published to crates.io" >> $GITHUB_STEP_SUMMARY
|
||||
echo "4. Users can install with: \`cargo add sal-os\` or \`cargo add sal --features all\`" >> $GITHUB_STEP_SUMMARY
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -62,5 +62,3 @@ docusaurus.config.ts
|
||||
sidebars.ts
|
||||
|
||||
tsconfig.json
|
||||
Cargo.toml.bak
|
||||
for_augment
|
||||
111
Cargo.toml
111
Cargo.toml
@@ -11,25 +11,7 @@ categories = ["os", "filesystem", "api-bindings"]
|
||||
readme = "README.md"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
".",
|
||||
"vault",
|
||||
"git",
|
||||
"redisclient",
|
||||
"mycelium",
|
||||
"text",
|
||||
"os",
|
||||
"net",
|
||||
"zinit_client",
|
||||
"process",
|
||||
"virt",
|
||||
"zos",
|
||||
"postgresclient",
|
||||
"kubernetes",
|
||||
"rhai",
|
||||
"herodo",
|
||||
"service_manager",
|
||||
]
|
||||
members = [".", "vault", "git", "redisclient", "mycelium", "text", "os", "net", "zinit_client", "process", "virt", "postgresclient", "rhai", "herodo"]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.metadata]
|
||||
@@ -84,84 +66,21 @@ windows = { version = "0.61.1", features = [
|
||||
] }
|
||||
|
||||
# Specialized dependencies
|
||||
zinit-client = "0.4.0"
|
||||
zinit-client = "0.3.0"
|
||||
urlencoding = "2.1.3"
|
||||
tokio-test = "0.4.4"
|
||||
|
||||
[dependencies]
|
||||
thiserror = "2.0.12" # For error handling in the main Error enum
|
||||
tokio = { workspace = true } # For async examples
|
||||
|
||||
# Optional dependencies - users can choose which modules to include
|
||||
sal-git = { path = "git", optional = true }
|
||||
sal-kubernetes = { path = "kubernetes", optional = true }
|
||||
sal-redisclient = { path = "redisclient", optional = true }
|
||||
sal-mycelium = { path = "mycelium", optional = true }
|
||||
sal-text = { path = "text", optional = true }
|
||||
sal-os = { path = "os", optional = true }
|
||||
sal-net = { path = "net", optional = true }
|
||||
sal-zinit-client = { path = "zinit_client", optional = true }
|
||||
sal-process = { path = "process", optional = true }
|
||||
sal-virt = { path = "virt", optional = true }
|
||||
sal-postgresclient = { path = "postgresclient", optional = true }
|
||||
sal-vault = { path = "vault", optional = true }
|
||||
sal-rhai = { path = "rhai", optional = true }
|
||||
sal-service-manager = { path = "service_manager", optional = true }
|
||||
zinit-client.workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
# Individual module features
|
||||
git = ["dep:sal-git"]
|
||||
kubernetes = ["dep:sal-kubernetes"]
|
||||
redisclient = ["dep:sal-redisclient"]
|
||||
mycelium = ["dep:sal-mycelium"]
|
||||
text = ["dep:sal-text"]
|
||||
os = ["dep:sal-os"]
|
||||
net = ["dep:sal-net"]
|
||||
zinit_client = ["dep:sal-zinit-client"]
|
||||
process = ["dep:sal-process"]
|
||||
virt = ["dep:sal-virt"]
|
||||
postgresclient = ["dep:sal-postgresclient"]
|
||||
vault = ["dep:sal-vault"]
|
||||
rhai = ["dep:sal-rhai"]
|
||||
service_manager = ["dep:sal-service-manager"]
|
||||
|
||||
# Convenience feature groups
|
||||
core = ["os", "process", "text", "net"]
|
||||
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium"]
|
||||
infrastructure = ["git", "vault", "kubernetes", "virt", "service_manager"]
|
||||
scripting = ["rhai"]
|
||||
all = [
|
||||
"git",
|
||||
"kubernetes",
|
||||
"redisclient",
|
||||
"mycelium",
|
||||
"text",
|
||||
"os",
|
||||
"net",
|
||||
"zinit_client",
|
||||
"process",
|
||||
"virt",
|
||||
"postgresclient",
|
||||
"vault",
|
||||
"rhai",
|
||||
"service_manager",
|
||||
]
|
||||
|
||||
# Examples
|
||||
[[example]]
|
||||
name = "postgres_cluster"
|
||||
path = "examples/kubernetes/clusters/postgres.rs"
|
||||
required-features = ["kubernetes"]
|
||||
|
||||
[[example]]
|
||||
name = "redis_cluster"
|
||||
path = "examples/kubernetes/clusters/redis.rs"
|
||||
required-features = ["kubernetes"]
|
||||
|
||||
[[example]]
|
||||
name = "generic_cluster"
|
||||
path = "examples/kubernetes/clusters/generic.rs"
|
||||
required-features = ["kubernetes"]
|
||||
thiserror = "2.0.12" # For error handling in the main Error enum
|
||||
sal-git = { path = "git" }
|
||||
sal-redisclient = { path = "redisclient" }
|
||||
sal-mycelium = { path = "mycelium" }
|
||||
sal-text = { path = "text" }
|
||||
sal-os = { path = "os" }
|
||||
sal-net = { path = "net" }
|
||||
sal-zinit-client = { path = "zinit_client" }
|
||||
sal-process = { path = "process" }
|
||||
sal-virt = { path = "virt" }
|
||||
sal-postgresclient = { path = "postgresclient" }
|
||||
sal-vault = { path = "vault" }
|
||||
sal-rhai = { path = "rhai" }
|
||||
|
||||
239
PUBLISHING.md
239
PUBLISHING.md
@@ -1,239 +0,0 @@
|
||||
# SAL Publishing Guide
|
||||
|
||||
This guide explains how to publish SAL crates to crates.io and how users can consume them.
|
||||
|
||||
## 🎯 Publishing Strategy
|
||||
|
||||
SAL uses a **modular publishing approach** where each module is published as an individual crate. This allows users to install only the functionality they need, reducing compilation time and binary size.
|
||||
|
||||
## 📦 Crate Structure
|
||||
|
||||
### Individual Crates
|
||||
|
||||
Each SAL module is published as a separate crate:
|
||||
|
||||
| Crate Name | Description | Category |
|
||||
|------------|-------------|----------|
|
||||
| `sal-os` | Operating system operations | Core |
|
||||
| `sal-process` | Process management | Core |
|
||||
| `sal-text` | Text processing utilities | Core |
|
||||
| `sal-net` | Network operations | Core |
|
||||
| `sal-git` | Git repository management | Infrastructure |
|
||||
| `sal-vault` | Cryptographic operations | Infrastructure |
|
||||
| `sal-kubernetes` | Kubernetes cluster management | Infrastructure |
|
||||
| `sal-virt` | Virtualization tools (Buildah, nerdctl) | Infrastructure |
|
||||
| `sal-redisclient` | Redis database client | Clients |
|
||||
| `sal-postgresclient` | PostgreSQL database client | Clients |
|
||||
| `sal-zinit-client` | Zinit process supervisor client | Clients |
|
||||
| `sal-mycelium` | Mycelium network client | Clients |
|
||||
| `sal-rhai` | Rhai scripting integration | Scripting |
|
||||
|
||||
### Meta-crate
|
||||
|
||||
The main `sal` crate serves as a meta-crate that re-exports all modules with optional features:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal = { version = "0.1.0", features = ["os", "process", "text"] }
|
||||
```
|
||||
|
||||
## 🚀 Publishing Process
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Crates.io Account**: Ensure you have a crates.io account and API token
|
||||
2. **Repository Access**: Ensure the repository URL is accessible
|
||||
3. **Version Consistency**: All crates should use the same version number
|
||||
|
||||
### Publishing Individual Crates
|
||||
|
||||
Each crate can be published independently:
|
||||
|
||||
```bash
|
||||
# Publish core modules
|
||||
cd os && cargo publish
|
||||
cd ../process && cargo publish
|
||||
cd ../text && cargo publish
|
||||
cd ../net && cargo publish
|
||||
|
||||
# Publish infrastructure modules
|
||||
cd ../git && cargo publish
|
||||
cd ../vault && cargo publish
|
||||
cd ../kubernetes && cargo publish
|
||||
cd ../virt && cargo publish
|
||||
|
||||
# Publish client modules
|
||||
cd ../redisclient && cargo publish
|
||||
cd ../postgresclient && cargo publish
|
||||
cd ../zinit_client && cargo publish
|
||||
cd ../mycelium && cargo publish
|
||||
|
||||
# Publish scripting module
|
||||
cd ../rhai && cargo publish
|
||||
|
||||
# Finally, publish the meta-crate
|
||||
cd .. && cargo publish
|
||||
```
|
||||
|
||||
### Automated Publishing
|
||||
|
||||
Use the comprehensive publishing script:
|
||||
|
||||
```bash
|
||||
# Test the publishing process (safe)
|
||||
./scripts/publish-all.sh --dry-run --version 0.1.0
|
||||
|
||||
# Actually publish to crates.io
|
||||
./scripts/publish-all.sh --version 0.1.0
|
||||
```
|
||||
|
||||
The script handles:
|
||||
- ✅ **Dependency order** - Publishes crates in correct dependency order
|
||||
- ✅ **Path dependencies** - Automatically updates path deps to version deps
|
||||
- ✅ **Rate limiting** - Waits between publishes to avoid rate limits
|
||||
- ✅ **Error handling** - Stops on failures with clear error messages
|
||||
- ✅ **Dry run mode** - Test without actually publishing
|
||||
|
||||
## 👥 User Consumption
|
||||
|
||||
### Installation Options
|
||||
|
||||
#### Option 1: Individual Crates (Recommended)
|
||||
|
||||
Users install only what they need:
|
||||
|
||||
```bash
|
||||
# Core functionality
|
||||
cargo add sal-os sal-process sal-text sal-net
|
||||
|
||||
# Database operations
|
||||
cargo add sal-redisclient sal-postgresclient
|
||||
|
||||
# Infrastructure management
|
||||
cargo add sal-git sal-vault sal-kubernetes
|
||||
|
||||
# Service integration
|
||||
cargo add sal-zinit-client sal-mycelium
|
||||
|
||||
# Scripting
|
||||
cargo add sal-rhai
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```rust
|
||||
use sal_os::fs;
|
||||
use sal_process::run;
|
||||
use sal_git::GitManager;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let files = fs::list_files(".")?;
|
||||
let result = run::command("echo hello")?;
|
||||
let git = GitManager::new(".")?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
#### Option 2: Meta-crate with Features
|
||||
|
||||
Users can use the main crate with selective features:
|
||||
|
||||
```bash
|
||||
# Specific modules
|
||||
cargo add sal --features os,process,text
|
||||
|
||||
# Feature groups
|
||||
cargo add sal --features core # os, process, text, net
|
||||
cargo add sal --features clients # redisclient, postgresclient, zinit_client, mycelium
|
||||
cargo add sal --features infrastructure # git, vault, kubernetes, virt
|
||||
cargo add sal --features scripting # rhai
|
||||
|
||||
# Everything
|
||||
cargo add sal --features all
|
||||
```
|
||||
|
||||
**Usage:**
|
||||
```rust
|
||||
// Cargo.toml: sal = { version = "0.1.0", features = ["os", "process", "git"] }
|
||||
use sal::os::fs;
|
||||
use sal::process::run;
|
||||
use sal::git::GitManager;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let files = fs::list_files(".")?;
|
||||
let result = run::command("echo hello")?;
|
||||
let git = GitManager::new(".")?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Feature Groups
|
||||
|
||||
The meta-crate provides convenient feature groups:
|
||||
|
||||
- **`core`**: Essential system operations (os, process, text, net)
|
||||
- **`clients`**: Database and service clients (redisclient, postgresclient, zinit_client, mycelium)
|
||||
- **`infrastructure`**: Infrastructure management tools (git, vault, kubernetes, virt)
|
||||
- **`scripting`**: Rhai scripting support (rhai)
|
||||
- **`all`**: Everything included
|
||||
|
||||
## 📋 Version Management
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
All SAL crates follow semantic versioning:
|
||||
|
||||
- **Major version**: Breaking API changes
|
||||
- **Minor version**: New features, backward compatible
|
||||
- **Patch version**: Bug fixes, backward compatible
|
||||
|
||||
### Synchronized Releases
|
||||
|
||||
All crates are released with the same version number to ensure compatibility:
|
||||
|
||||
```toml
|
||||
# All crates use the same version
|
||||
sal-os = "0.1.0"
|
||||
sal-process = "0.1.0"
|
||||
sal-git = "0.1.0"
|
||||
# etc.
|
||||
```
|
||||
|
||||
## 🔧 Maintenance
|
||||
|
||||
### Updating Dependencies
|
||||
|
||||
When updating dependencies:
|
||||
|
||||
1. Update `Cargo.toml` in the workspace root
|
||||
2. Update individual crate dependencies if needed
|
||||
3. Test all crates: `cargo test --workspace`
|
||||
4. Publish with incremented version numbers
|
||||
|
||||
### Adding New Modules
|
||||
|
||||
To add a new SAL module:
|
||||
|
||||
1. Create the new crate directory
|
||||
2. Add to workspace members in root `Cargo.toml`
|
||||
3. Add optional dependency in root `Cargo.toml`
|
||||
4. Add feature flag in root `Cargo.toml`
|
||||
5. Add conditional re-export in `src/lib.rs`
|
||||
6. Update documentation
|
||||
|
||||
## 🎉 Benefits
|
||||
|
||||
### For Users
|
||||
|
||||
- **Minimal Dependencies**: Install only what you need
|
||||
- **Faster Builds**: Smaller dependency trees compile faster
|
||||
- **Smaller Binaries**: Reduced binary size
|
||||
- **Clear Dependencies**: Explicit about what functionality is used
|
||||
|
||||
### For Maintainers
|
||||
|
||||
- **Independent Releases**: Can release individual crates as needed
|
||||
- **Focused Testing**: Test individual modules in isolation
|
||||
- **Clear Ownership**: Each crate has clear responsibility
|
||||
- **Easier Maintenance**: Smaller, focused codebases
|
||||
|
||||
This publishing strategy provides the best of both worlds: modularity for users who want minimal dependencies, and convenience for users who prefer a single crate with features.
|
||||
324
README.md
324
README.md
@@ -1,148 +1,228 @@
|
||||
# SAL (System Abstraction Layer)
|
||||
|
||||
**Version 0.1.0** - A modular Rust library for cross-platform system operations and automation.
|
||||
**Version: 0.1.0**
|
||||
|
||||
SAL provides a unified interface for system operations with Rhai scripting support through the `herodo` tool.
|
||||
SAL is a comprehensive Rust library designed to provide a unified and simplified interface for a wide array of system-level operations and interactions. It abstracts platform-specific details, enabling developers to write robust, cross-platform code with greater ease. SAL also includes `herodo`, a powerful command-line tool for executing Rhai scripts that leverage SAL's capabilities for automation and system management tasks.
|
||||
|
||||
## Installation
|
||||
## 🏗️ **Cargo Workspace Structure**
|
||||
|
||||
### Individual Packages (Recommended)
|
||||
SAL is organized as a **Cargo workspace** with 16 specialized crates:
|
||||
|
||||
```bash
|
||||
# Core functionality
|
||||
cargo add sal-os sal-process sal-text sal-net
|
||||
- **Root Package**: `sal` - Umbrella crate that re-exports all modules
|
||||
- **13 Library Crates**: Specialized SAL modules (git, text, os, net, etc.)
|
||||
- **1 Binary Crate**: `herodo` - Rhai script execution engine
|
||||
- **1 Integration Crate**: `rhai` - Rhai scripting integration layer
|
||||
|
||||
# Infrastructure
|
||||
cargo add sal-git sal-vault sal-kubernetes sal-virt
|
||||
This workspace structure provides excellent build performance, dependency management, and maintainability.
|
||||
|
||||
# Database clients
|
||||
cargo add sal-redisclient sal-postgresclient sal-zinit-client
|
||||
|
||||
# Scripting
|
||||
cargo add sal-rhai
|
||||
```
|
||||
|
||||
### Meta-package with Features
|
||||
|
||||
```bash
|
||||
cargo add sal --features core # os, process, text, net
|
||||
cargo add sal --features infrastructure # git, vault, kubernetes, virt
|
||||
cargo add sal --features all # everything
|
||||
```
|
||||
|
||||
### Herodo Script Runner
|
||||
|
||||
```bash
|
||||
cargo install herodo
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Rust Library Usage
|
||||
|
||||
```rust
|
||||
use sal_os::fs;
|
||||
use sal_process::run;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let files = fs::list_files(".")?;
|
||||
println!("Found {} files", files.len());
|
||||
|
||||
let result = run::command("echo hello")?;
|
||||
println!("Output: {}", result.stdout);
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Herodo Scripting
|
||||
|
||||
```bash
|
||||
# Create script
|
||||
cat > example.rhai << 'EOF'
|
||||
let files = find_files(".", "*.rs");
|
||||
print("Found " + files.len() + " Rust files");
|
||||
|
||||
let result = run("echo 'Hello from SAL!'");
|
||||
print("Output: " + result.stdout);
|
||||
EOF
|
||||
|
||||
# Run script
|
||||
herodo example.rhai
|
||||
```
|
||||
|
||||
## Available Packages
|
||||
|
||||
| Package | Description |
|
||||
|---------|-------------|
|
||||
| [`sal-os`](https://crates.io/crates/sal-os) | Operating system operations |
|
||||
| [`sal-process`](https://crates.io/crates/sal-process) | Process management |
|
||||
| [`sal-text`](https://crates.io/crates/sal-text) | Text processing |
|
||||
| [`sal-net`](https://crates.io/crates/sal-net) | Network operations |
|
||||
| [`sal-git`](https://crates.io/crates/sal-git) | Git repository management |
|
||||
| [`sal-vault`](https://crates.io/crates/sal-vault) | Cryptographic operations |
|
||||
| [`sal-kubernetes`](https://crates.io/crates/sal-kubernetes) | Kubernetes management |
|
||||
| [`sal-virt`](https://crates.io/crates/sal-virt) | Virtualization tools |
|
||||
| [`sal-redisclient`](https://crates.io/crates/sal-redisclient) | Redis client |
|
||||
| [`sal-postgresclient`](https://crates.io/crates/sal-postgresclient) | PostgreSQL client |
|
||||
| [`sal-zinit-client`](https://crates.io/crates/sal-zinit-client) | Zinit process supervisor |
|
||||
| [`sal-mycelium`](https://crates.io/crates/sal-mycelium) | Mycelium network client |
|
||||
| [`sal-service-manager`](https://crates.io/crates/sal-service-manager) | Service management |
|
||||
| [`sal-rhai`](https://crates.io/crates/sal-rhai) | Rhai scripting integration |
|
||||
| [`sal`](https://crates.io/crates/sal) | Meta-crate with features |
|
||||
| [`herodo`](https://crates.io/crates/herodo) | Script executor binary |
|
||||
|
||||
## Building & Testing
|
||||
|
||||
```bash
|
||||
# Build all packages
|
||||
cargo build --workspace
|
||||
|
||||
# Run tests
|
||||
cargo test --workspace
|
||||
|
||||
# Run Rhai integration tests
|
||||
./run_rhai_tests.sh
|
||||
```
|
||||
### **🚀 Workspace Benefits**
|
||||
- **Unified Dependency Management**: Shared dependencies across all crates with consistent versions
|
||||
- **Optimized Build Performance**: Parallel compilation and shared build artifacts
|
||||
- **Simplified Testing**: Run tests across all modules with a single command
|
||||
- **Modular Architecture**: Each module is independently maintainable while sharing common infrastructure
|
||||
- **Production Ready**: 100% test coverage with comprehensive Rhai integration tests
|
||||
|
||||
## Core Features
|
||||
|
||||
- **System Operations**: File/directory management, environment access, OS commands
|
||||
- **Process Management**: Create, monitor, and control system processes
|
||||
- **Containerization**: Buildah and nerdctl integration
|
||||
- **Version Control**: Git repository operations
|
||||
- **Database Clients**: Redis and PostgreSQL support
|
||||
- **Networking**: HTTP, TCP, SSH connectivity utilities
|
||||
- **Cryptography**: Key management, encryption, digital signatures
|
||||
- **Text Processing**: String manipulation and templating
|
||||
- **Scripting**: Rhai script execution via `herodo`
|
||||
SAL offers a broad spectrum of functionalities, including:
|
||||
|
||||
## Herodo Scripting
|
||||
- **System Operations**: File and directory management, environment variable access, system information retrieval, and OS-specific commands.
|
||||
- **Process Management**: Create, monitor, control, and interact with system processes.
|
||||
- **Containerization Tools**:
|
||||
- Integration with **Buildah** for building OCI/Docker-compatible container images.
|
||||
- Integration with **nerdctl** for managing containers (run, stop, list, build, etc.).
|
||||
- **Version Control**: Programmatic interaction with Git repositories (clone, commit, push, pull, status, etc.).
|
||||
- **Database Clients**:
|
||||
- **Redis**: Robust client for interacting with Redis servers.
|
||||
- **PostgreSQL**: Client for executing queries and managing PostgreSQL databases.
|
||||
- **Scripting Engine**: In-built support for the **Rhai** scripting language, allowing SAL functionalities to be scripted and automated, primarily through the `herodo` tool.
|
||||
- **Networking & Services**:
|
||||
- **Mycelium**: Tools for Mycelium network peer management and message passing.
|
||||
- **Zinit**: Client for interacting with the Zinit process supervision system.
|
||||
- **RFS (Remote/Virtual Filesystem)**: Mount, manage, pack, and unpack various types of filesystems (local, SSH, S3, WebDAV).
|
||||
- **Text Processing**: A suite of utilities for text manipulation, formatting, and regular expressions.
|
||||
- **Cryptography (`vault`)**: Functions for common cryptographic operations.
|
||||
|
||||
`herodo` executes Rhai scripts with access to all SAL modules:
|
||||
## `herodo`: The SAL Scripting Tool
|
||||
|
||||
`herodo` is a command-line utility bundled with SAL that executes Rhai scripts. It empowers users to automate tasks and orchestrate complex workflows by leveraging SAL's diverse modules directly from scripts.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
herodo script.rhai # Run single script
|
||||
herodo script.rhai arg1 arg2 # With arguments
|
||||
herodo /path/to/scripts/ # Run all .rhai files in directory
|
||||
# Execute a single Rhai script
|
||||
herodo script.rhai
|
||||
|
||||
# Execute a script with arguments
|
||||
herodo script.rhai arg1 arg2
|
||||
|
||||
# Execute all .rhai scripts in a directory
|
||||
herodo /path/to/scripts/
|
||||
```
|
||||
|
||||
### Example Script
|
||||
If a directory is provided, `herodo` will execute all `.rhai` scripts within that directory (and its subdirectories) in alphabetical order.
|
||||
|
||||
### Scriptable SAL Modules via `herodo`
|
||||
|
||||
The following SAL modules and functionalities are exposed to the Rhai scripting environment through `herodo`:
|
||||
|
||||
- **OS (`os`)**: Comprehensive file system operations, file downloading & installation, and system package management. [Documentation](os/README.md)
|
||||
- **Process (`process`)**: Robust command and script execution, plus process management (listing, finding, killing, checking command existence). [Documentation](process/README.md)
|
||||
- **Text (`text`)**: String manipulation, prefixing, path/name fixing, text replacement, and templating. [Documentation](text/README.md)
|
||||
- **Net (`net`)**: Network operations, HTTP requests, and connectivity utilities. [Documentation](net/README.md)
|
||||
- **Git (`git`)**: High-level repository management and generic Git command execution with Redis-backed authentication (clone, pull, push, commit, etc.). [Documentation](git/README.md)
|
||||
- **Vault (`vault`)**: Cryptographic operations, keypair management, encryption, decryption, hashing, etc. [Documentation](vault/README.md)
|
||||
- **Redis Client (`redisclient`)**: Execute Redis commands (`redis_get`, `redis_set`, `redis_execute`, etc.). [Documentation](redisclient/README.md)
|
||||
- **PostgreSQL Client (`postgresclient`)**: Execute SQL queries against PostgreSQL databases. [Documentation](postgresclient/README.md)
|
||||
- **Zinit (`zinit_client`)**: Client for Zinit process supervisor (service management, logs). [Documentation](zinit_client/README.md)
|
||||
- **Mycelium (`mycelium`)**: Client for Mycelium decentralized networking API (node info, peer management, messaging). [Documentation](mycelium/README.md)
|
||||
- **Virtualization (`virt`)**:
|
||||
- **Buildah**: OCI/Docker image building functions. [Documentation](virt/README.md)
|
||||
- **nerdctl**: Container lifecycle management (`nerdctl_run`, `nerdctl_stop`, `nerdctl_images`, `nerdctl_image_build`, etc.)
|
||||
- **RFS**: Mount various filesystems (local, SSH, S3, etc.), pack/unpack filesystem layers.
|
||||
|
||||
### Example `herodo` Rhai Script
|
||||
|
||||
```rhai
|
||||
// File operations
|
||||
let files = find_files(".", "*.rs");
|
||||
print("Found " + files.len() + " Rust files");
|
||||
// file: /opt/scripts/example_task.rhai
|
||||
|
||||
// Process execution
|
||||
let result = run("echo 'Hello SAL!'");
|
||||
print("Output: " + result.stdout);
|
||||
// OS operations
|
||||
println("Checking for /tmp/my_app_data...");
|
||||
if !exist("/tmp/my_app_data") {
|
||||
mkdir("/tmp/my_app_data");
|
||||
println("Created directory /tmp/my_app_data");
|
||||
}
|
||||
|
||||
// Redis operations
|
||||
redis_set("status", "running");
|
||||
let status = redis_get("status");
|
||||
print("Status: " + status);
|
||||
println("Setting Redis key 'app_status' to 'running'");
|
||||
redis_set("app_status", "running");
|
||||
let status = redis_get("app_status");
|
||||
println("Current app_status from Redis: " + status);
|
||||
|
||||
// Process execution
|
||||
println("Listing files in /tmp:");
|
||||
let output = run("ls -la /tmp");
|
||||
println(output.stdout);
|
||||
|
||||
println("Script finished.");
|
||||
```
|
||||
|
||||
Run with: `herodo /opt/scripts/example_task.rhai`
|
||||
|
||||
For more examples, check the individual module test directories (e.g., `text/tests/rhai/`, `os/tests/rhai/`, etc.) in this repository.
|
||||
|
||||
## Using SAL as a Rust Library
|
||||
|
||||
Add SAL as a dependency to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal = "0.1.0" # Or the latest version
|
||||
```
|
||||
|
||||
### Rust Example: Using Redis Client
|
||||
|
||||
```rust
|
||||
use sal::redisclient::{get_global_client, execute_cmd_with_args};
|
||||
use redis::RedisResult;
|
||||
|
||||
async fn example_redis_interaction() -> RedisResult<()> {
|
||||
// Get a connection from the global pool
|
||||
let mut conn = get_global_client().await?.get_async_connection().await?;
|
||||
|
||||
// Set a value
|
||||
execute_cmd_with_args(&mut conn, "SET", vec!["my_key", "my_value"]).await?;
|
||||
println!("Set 'my_key' to 'my_value'");
|
||||
|
||||
// Get a value
|
||||
let value: String = execute_cmd_with_args(&mut conn, "GET", vec!["my_key"]).await?;
|
||||
println!("Retrieved value for 'my_key': {}", value);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
if let Err(e) = example_redis_interaction().await {
|
||||
eprintln!("Redis Error: {}", e);
|
||||
}
|
||||
}
|
||||
```
|
||||
*(Note: The Redis client API might have evolved; please refer to `src/redisclient/mod.rs` and its documentation for the most current usage.)*
|
||||
|
||||
## 📦 **Workspace Modules Overview**
|
||||
|
||||
SAL is organized as a Cargo workspace with the following crates:
|
||||
|
||||
### **Core Library Modules**
|
||||
- **`sal-os`**: Core OS interactions, file system operations, environment access
|
||||
- **`sal-process`**: Process creation, management, and control
|
||||
- **`sal-text`**: Utilities for text processing and manipulation
|
||||
- **`sal-net`**: Network operations, HTTP requests, and connectivity utilities
|
||||
|
||||
### **Integration Modules**
|
||||
- **`sal-git`**: Git repository management and operations
|
||||
- **`sal-vault`**: Cryptographic functions and keypair management
|
||||
- **`sal-rhai`**: Integration layer for the Rhai scripting engine, used by `herodo`
|
||||
|
||||
### **Client Modules**
|
||||
- **`sal-redisclient`**: Client for Redis database interactions
|
||||
- **`sal-postgresclient`**: Client for PostgreSQL database interactions
|
||||
- **`sal-zinit-client`**: Client for Zinit process supervisor
|
||||
- **`sal-mycelium`**: Client for Mycelium network operations
|
||||
|
||||
### **Specialized Modules**
|
||||
- **`sal-virt`**: Virtualization-related utilities (buildah, nerdctl, rfs)
|
||||
|
||||
### **Root Package & Binary**
|
||||
- **`sal`**: Root umbrella crate that re-exports all modules
|
||||
- **`herodo`**: Command-line binary for executing Rhai scripts
|
||||
|
||||
## 🔨 **Building SAL**
|
||||
|
||||
Build the entire workspace (all crates) using Cargo:
|
||||
|
||||
```bash
|
||||
# Build all workspace members
|
||||
cargo build --workspace
|
||||
|
||||
# Build for release
|
||||
cargo build --workspace --release
|
||||
|
||||
# Build specific crate
|
||||
cargo build -p sal-text
|
||||
cargo build -p herodo
|
||||
```
|
||||
|
||||
The `herodo` executable will be located at `target/debug/herodo` or `target/release/herodo`.
|
||||
|
||||
## 🧪 **Running Tests**
|
||||
|
||||
### **Rust Unit Tests**
|
||||
```bash
|
||||
# Run all workspace tests
|
||||
cargo test --workspace
|
||||
|
||||
# Run tests for specific crate
|
||||
cargo test -p sal-text
|
||||
cargo test -p sal-os
|
||||
|
||||
# Run only library tests (faster)
|
||||
cargo test --workspace --lib
|
||||
```
|
||||
|
||||
### **Rhai Integration Tests**
|
||||
Run comprehensive Rhai script tests that exercise `herodo` and SAL's scripted functionalities:
|
||||
|
||||
```bash
|
||||
# Run all Rhai integration tests (16 modules)
|
||||
./run_rhai_tests.sh
|
||||
|
||||
# Results: 16/16 modules pass with 100% success rate
|
||||
```
|
||||
|
||||
The Rhai tests validate real-world functionality across all SAL modules and provide comprehensive integration testing.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the Apache License 2.0. See [LICENSE](LICENSE) for details.
|
||||
SAL is licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
@@ -1,76 +1,64 @@
|
||||
# SAL Vault Examples
|
||||
# Hero Vault Cryptography Examples
|
||||
|
||||
This directory contains examples demonstrating the SAL Vault functionality.
|
||||
This directory contains examples demonstrating the Hero Vault cryptography functionality integrated into the SAL project.
|
||||
|
||||
## Overview
|
||||
|
||||
SAL Vault provides secure key management and cryptographic operations including:
|
||||
Hero Vault provides cryptographic operations including:
|
||||
|
||||
- Vault creation and management
|
||||
- KeySpace operations (encrypted key-value stores)
|
||||
- Symmetric key generation and operations
|
||||
- Asymmetric key operations (signing and verification)
|
||||
- Secure key derivation from passwords
|
||||
- Key space management (creation, loading, encryption, decryption)
|
||||
- Keypair management (creation, selection, listing)
|
||||
- Digital signatures (signing and verification)
|
||||
- Symmetric encryption (key generation, encryption, decryption)
|
||||
- Ethereum wallet functionality
|
||||
- Smart contract interactions
|
||||
- Key-value store with encryption
|
||||
|
||||
## Current Status
|
||||
## Example Files
|
||||
|
||||
⚠️ **Note**: The vault module is currently being updated to use Lee's implementation.
|
||||
The Rhai scripting integration is temporarily disabled while we adapt the examples
|
||||
to work with the new vault API.
|
||||
- `example.rhai` - Basic example demonstrating key management, signing, and encryption
|
||||
- `advanced_example.rhai` - Advanced example with error handling, conditional logic, and more complex operations
|
||||
- `key_persistence_example.rhai` - Demonstrates creating and saving a key space to disk
|
||||
- `load_existing_space.rhai` - Shows how to load a previously created key space and use its keypairs
|
||||
- `contract_example.rhai` - Demonstrates loading a contract ABI and interacting with smart contracts
|
||||
- `agung_send_transaction.rhai` - Demonstrates sending native tokens on the Agung network
|
||||
- `agung_contract_with_args.rhai` - Shows how to interact with contracts with arguments on Agung
|
||||
|
||||
## Available Operations
|
||||
## Running the Examples
|
||||
|
||||
- **Vault Management**: Create and manage vault instances
|
||||
- **KeySpace Operations**: Open encrypted key-value stores within vaults
|
||||
- **Symmetric Encryption**: Generate keys and encrypt/decrypt data
|
||||
- **Asymmetric Operations**: Create keypairs, sign messages, verify signatures
|
||||
You can run the examples using the `herodo` tool that comes with the SAL project:
|
||||
|
||||
## Example Files (Legacy - Sameh's Implementation)
|
||||
```bash
|
||||
# Run a single example
|
||||
herodo --path example.rhai
|
||||
|
||||
⚠️ **These examples are currently archived and use the previous vault implementation**:
|
||||
|
||||
- `_archive/example.rhai` - Basic example demonstrating key management, signing, and encryption
|
||||
- `_archive/advanced_example.rhai` - Advanced example with error handling and complex operations
|
||||
- `_archive/key_persistence_example.rhai` - Demonstrates creating and saving a key space to disk
|
||||
- `_archive/load_existing_space.rhai` - Shows how to load a previously created key space
|
||||
- `_archive/contract_example.rhai` - Demonstrates smart contract interactions (Ethereum)
|
||||
- `_archive/agung_send_transaction.rhai` - Demonstrates Ethereum transactions on Agung network
|
||||
- `_archive/agung_contract_with_args.rhai` - Shows contract interactions with arguments
|
||||
|
||||
## Current Implementation (Lee's Vault)
|
||||
|
||||
The current vault implementation provides:
|
||||
|
||||
```rust
|
||||
// Create a new vault
|
||||
let vault = Vault::new(&path).await?;
|
||||
|
||||
// Open an encrypted keyspace
|
||||
let keyspace = vault.open_keyspace("my_space", "password").await?;
|
||||
|
||||
// Perform cryptographic operations
|
||||
// (API documentation coming soon)
|
||||
# Run all examples using the provided script
|
||||
./run_examples.sh
|
||||
```
|
||||
|
||||
## Migration Status
|
||||
## Key Space Storage
|
||||
|
||||
- ✅ **Vault Core**: Lee's implementation is active
|
||||
- ✅ **Archive**: Sameh's implementation preserved in `vault/_archive/`
|
||||
- ⏳ **Rhai Integration**: Being developed for Lee's implementation
|
||||
- ⏳ **Examples**: Will be updated to use Lee's API
|
||||
- ❌ **Ethereum Features**: Not available in Lee's implementation
|
||||
Key spaces are stored in the `~/.hero-vault/key-spaces/` directory by default. Each key space is stored in a separate JSON file named after the key space (e.g., `my_space.json`).
|
||||
|
||||
## Ethereum Functionality
|
||||
|
||||
The Hero Vault module provides comprehensive Ethereum wallet functionality:
|
||||
|
||||
- Creating and managing wallets for different networks
|
||||
- Sending ETH transactions
|
||||
- Checking balances
|
||||
- Interacting with smart contracts (read and write functions)
|
||||
- Support for multiple networks (Ethereum, Gnosis, Peaq, Agung, etc.)
|
||||
|
||||
## Security
|
||||
|
||||
The vault uses:
|
||||
Key spaces are encrypted with ChaCha20Poly1305 using a key derived from the provided password. The encryption ensures that the key material is secure at rest.
|
||||
|
||||
- **ChaCha20Poly1305** for symmetric encryption
|
||||
- **Password-based key derivation** for keyspace encryption
|
||||
- **Secure key storage** with proper isolation
|
||||
## Best Practices
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Rhai Integration**: Implement Rhai bindings for Lee's vault
|
||||
2. **New Examples**: Create examples using Lee's simpler API
|
||||
3. **Documentation**: Complete API documentation for Lee's implementation
|
||||
4. **Migration Guide**: Provide guidance for users migrating from Sameh's implementation
|
||||
1. **Use Strong Passwords**: Since the security of your key spaces depends on the strength of your passwords, use strong, unique passwords.
|
||||
2. **Backup Key Spaces**: Regularly backup your key spaces directory to prevent data loss.
|
||||
3. **Script Organization**: Split your scripts into logical units, with separate scripts for key creation and key usage.
|
||||
4. **Error Handling**: Always check the return values of functions to ensure operations succeeded before proceeding.
|
||||
5. **Network Selection**: When working with Ethereum functionality, be explicit about which network you're targeting to avoid confusion.
|
||||
6. **Gas Management**: For Ethereum transactions, consider gas costs and set appropriate gas limits.
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
//! Basic Kubernetes operations example
|
||||
//!
|
||||
//! This script demonstrates basic Kubernetes operations using the SAL Kubernetes module.
|
||||
//!
|
||||
//! Prerequisites:
|
||||
//! - A running Kubernetes cluster
|
||||
//! - Valid kubeconfig file or in-cluster configuration
|
||||
//! - Appropriate permissions for the operations
|
||||
//!
|
||||
//! Usage:
|
||||
//! herodo examples/kubernetes/basic_operations.rhai
|
||||
|
||||
print("=== SAL Kubernetes Basic Operations Example ===");
|
||||
|
||||
// Create a KubernetesManager for the default namespace
|
||||
print("Creating KubernetesManager for 'default' namespace...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✓ KubernetesManager created for namespace: " + namespace(km));
|
||||
|
||||
// List all pods in the namespace
|
||||
print("\n--- Listing Pods ---");
|
||||
let pods = pods_list(km);
|
||||
print("Found " + pods.len() + " pods in the namespace:");
|
||||
for pod in pods {
|
||||
print(" - " + pod);
|
||||
}
|
||||
|
||||
// List all services in the namespace
|
||||
print("\n--- Listing Services ---");
|
||||
let services = services_list(km);
|
||||
print("Found " + services.len() + " services in the namespace:");
|
||||
for service in services {
|
||||
print(" - " + service);
|
||||
}
|
||||
|
||||
// List all deployments in the namespace
|
||||
print("\n--- Listing Deployments ---");
|
||||
let deployments = deployments_list(km);
|
||||
print("Found " + deployments.len() + " deployments in the namespace:");
|
||||
for deployment in deployments {
|
||||
print(" - " + deployment);
|
||||
}
|
||||
|
||||
// Get resource counts
|
||||
print("\n--- Resource Counts ---");
|
||||
let counts = resource_counts(km);
|
||||
print("Resource counts in namespace '" + namespace(km) + "':");
|
||||
for resource_type in counts.keys() {
|
||||
print(" " + resource_type + ": " + counts[resource_type]);
|
||||
}
|
||||
|
||||
// List all namespaces (cluster-wide operation)
|
||||
print("\n--- Listing All Namespaces ---");
|
||||
let namespaces = namespaces_list(km);
|
||||
print("Found " + namespaces.len() + " namespaces in the cluster:");
|
||||
for ns in namespaces {
|
||||
print(" - " + ns);
|
||||
}
|
||||
|
||||
// Check if specific namespaces exist
|
||||
print("\n--- Checking Namespace Existence ---");
|
||||
let test_namespaces = ["default", "kube-system", "non-existent-namespace"];
|
||||
for ns in test_namespaces {
|
||||
let exists = namespace_exists(km, ns);
|
||||
if exists {
|
||||
print("✓ Namespace '" + ns + "' exists");
|
||||
} else {
|
||||
print("✗ Namespace '" + ns + "' does not exist");
|
||||
}
|
||||
}
|
||||
|
||||
print("\n=== Example completed successfully! ===");
|
||||
@@ -1,134 +0,0 @@
|
||||
//! Generic Application Deployment Example
|
||||
//!
|
||||
//! This example shows how to deploy any containerized application using the
|
||||
//! KubernetesManager convenience methods. This works for any Docker image.
|
||||
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create Kubernetes manager
|
||||
let km = KubernetesManager::new("default").await?;
|
||||
|
||||
// Clean up any existing resources first
|
||||
println!("=== Cleaning up existing resources ===");
|
||||
let apps_to_clean = ["web-server", "node-app", "mongodb"];
|
||||
|
||||
for app in &apps_to_clean {
|
||||
match km.deployment_delete(app).await {
|
||||
Ok(_) => println!("✓ Deleted existing deployment: {}", app),
|
||||
Err(_) => println!("✓ No existing deployment to delete: {}", app),
|
||||
}
|
||||
|
||||
match km.service_delete(app).await {
|
||||
Ok(_) => println!("✓ Deleted existing service: {}", app),
|
||||
Err(_) => println!("✓ No existing service to delete: {}", app),
|
||||
}
|
||||
}
|
||||
|
||||
// Example 1: Simple web server deployment
|
||||
println!("\n=== Example 1: Simple Nginx Web Server ===");
|
||||
|
||||
km.deploy_application("web-server", "nginx:latest", 2, 80, None, None)
|
||||
.await?;
|
||||
println!("✅ Nginx web server deployed!");
|
||||
|
||||
// Example 2: Node.js application with labels
|
||||
println!("\n=== Example 2: Node.js Application ===");
|
||||
|
||||
let mut node_labels = HashMap::new();
|
||||
node_labels.insert("app".to_string(), "node-app".to_string());
|
||||
node_labels.insert("tier".to_string(), "backend".to_string());
|
||||
node_labels.insert("environment".to_string(), "production".to_string());
|
||||
|
||||
// Configure Node.js environment variables
|
||||
let mut node_env_vars = HashMap::new();
|
||||
node_env_vars.insert("NODE_ENV".to_string(), "production".to_string());
|
||||
node_env_vars.insert("PORT".to_string(), "3000".to_string());
|
||||
node_env_vars.insert("LOG_LEVEL".to_string(), "info".to_string());
|
||||
node_env_vars.insert("MAX_CONNECTIONS".to_string(), "1000".to_string());
|
||||
|
||||
km.deploy_application(
|
||||
"node-app", // name
|
||||
"node:18-alpine", // image
|
||||
3, // replicas - scale to 3 instances
|
||||
3000, // port
|
||||
Some(node_labels), // labels
|
||||
Some(node_env_vars), // environment variables
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("✅ Node.js application deployed!");
|
||||
|
||||
// Example 3: Database deployment (any database)
|
||||
println!("\n=== Example 3: MongoDB Database ===");
|
||||
|
||||
let mut mongo_labels = HashMap::new();
|
||||
mongo_labels.insert("app".to_string(), "mongodb".to_string());
|
||||
mongo_labels.insert("type".to_string(), "database".to_string());
|
||||
mongo_labels.insert("engine".to_string(), "mongodb".to_string());
|
||||
|
||||
// Configure MongoDB environment variables
|
||||
let mut mongo_env_vars = HashMap::new();
|
||||
mongo_env_vars.insert(
|
||||
"MONGO_INITDB_ROOT_USERNAME".to_string(),
|
||||
"admin".to_string(),
|
||||
);
|
||||
mongo_env_vars.insert(
|
||||
"MONGO_INITDB_ROOT_PASSWORD".to_string(),
|
||||
"mongopassword".to_string(),
|
||||
);
|
||||
mongo_env_vars.insert("MONGO_INITDB_DATABASE".to_string(), "myapp".to_string());
|
||||
|
||||
km.deploy_application(
|
||||
"mongodb", // name
|
||||
"mongo:6.0", // image
|
||||
1, // replicas - single instance for simplicity
|
||||
27017, // port
|
||||
Some(mongo_labels), // labels
|
||||
Some(mongo_env_vars), // environment variables
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("✅ MongoDB deployed!");
|
||||
|
||||
// Check status of all deployments
|
||||
println!("\n=== Checking Deployment Status ===");
|
||||
|
||||
let deployments = km.deployments_list().await?;
|
||||
|
||||
for deployment in &deployments {
|
||||
if let Some(name) = &deployment.metadata.name {
|
||||
let total_replicas = deployment
|
||||
.spec
|
||||
.as_ref()
|
||||
.and_then(|s| s.replicas)
|
||||
.unwrap_or(0);
|
||||
let ready_replicas = deployment
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.ready_replicas)
|
||||
.unwrap_or(0);
|
||||
|
||||
println!(
|
||||
"{}: {}/{} replicas ready",
|
||||
name, ready_replicas, total_replicas
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
println!("\n🎉 All deployments completed!");
|
||||
println!("\n💡 Key Points:");
|
||||
println!(" • Any Docker image can be deployed using this simple interface");
|
||||
println!(" • Use labels to organize and identify your applications");
|
||||
println!(
|
||||
" • The same method works for databases, web servers, APIs, and any containerized app"
|
||||
);
|
||||
println!(" • For advanced configuration, use the individual KubernetesManager methods");
|
||||
println!(
|
||||
" • Environment variables and resource limits can be added via direct Kubernetes API"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
//! PostgreSQL Cluster Deployment Example (Rhai)
|
||||
//!
|
||||
//! This script shows how to deploy a PostgreSQL cluster using Rhai scripting
|
||||
//! with the KubernetesManager convenience methods.
|
||||
|
||||
print("=== PostgreSQL Cluster Deployment ===");
|
||||
|
||||
// Create Kubernetes manager for the database namespace
|
||||
print("Creating Kubernetes manager for 'database' namespace...");
|
||||
let km = kubernetes_manager_new("database");
|
||||
print("✓ Kubernetes manager created");
|
||||
|
||||
// Create the namespace if it doesn't exist
|
||||
print("Creating namespace 'database' if it doesn't exist...");
|
||||
try {
|
||||
create_namespace(km, "database");
|
||||
print("✓ Namespace 'database' created");
|
||||
} catch(e) {
|
||||
if e.to_string().contains("already exists") {
|
||||
print("✓ Namespace 'database' already exists");
|
||||
} else {
|
||||
print("⚠️ Warning: " + e);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any existing resources first
|
||||
print("\nCleaning up any existing PostgreSQL resources...");
|
||||
try {
|
||||
delete_deployment(km, "postgres-cluster");
|
||||
print("✓ Deleted existing deployment");
|
||||
} catch(e) {
|
||||
print("✓ No existing deployment to delete");
|
||||
}
|
||||
|
||||
try {
|
||||
delete_service(km, "postgres-cluster");
|
||||
print("✓ Deleted existing service");
|
||||
} catch(e) {
|
||||
print("✓ No existing service to delete");
|
||||
}
|
||||
|
||||
// Create PostgreSQL cluster using the convenience method
|
||||
print("\nDeploying PostgreSQL cluster...");
|
||||
|
||||
try {
|
||||
// Deploy PostgreSQL using the convenience method
|
||||
let result = deploy_application(km, "postgres-cluster", "postgres:15", 2, 5432, #{
|
||||
"app": "postgres-cluster",
|
||||
"type": "database",
|
||||
"engine": "postgresql"
|
||||
}, #{
|
||||
"POSTGRES_DB": "myapp",
|
||||
"POSTGRES_USER": "postgres",
|
||||
"POSTGRES_PASSWORD": "secretpassword",
|
||||
"PGDATA": "/var/lib/postgresql/data/pgdata"
|
||||
});
|
||||
print("✓ " + result);
|
||||
|
||||
print("\n✅ PostgreSQL cluster deployed successfully!");
|
||||
|
||||
print("\n📋 Connection Information:");
|
||||
print(" Host: postgres-cluster.database.svc.cluster.local");
|
||||
print(" Port: 5432");
|
||||
print(" Database: postgres (default)");
|
||||
print(" Username: postgres (default)");
|
||||
|
||||
print("\n🔧 To connect from another pod:");
|
||||
print(" psql -h postgres-cluster.database.svc.cluster.local -U postgres");
|
||||
|
||||
print("\n💡 Next steps:");
|
||||
print(" • Set POSTGRES_PASSWORD environment variable");
|
||||
print(" • Configure persistent storage");
|
||||
print(" • Set up backup and monitoring");
|
||||
|
||||
} catch(e) {
|
||||
print("❌ Failed to deploy PostgreSQL cluster: " + e);
|
||||
}
|
||||
|
||||
print("\n=== Deployment Complete ===");
|
||||
@@ -1,112 +0,0 @@
|
||||
//! PostgreSQL Cluster Deployment Example
|
||||
//!
|
||||
//! This example shows how to deploy a PostgreSQL cluster using the
|
||||
//! KubernetesManager convenience methods.
|
||||
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create Kubernetes manager for the database namespace
|
||||
let km = KubernetesManager::new("database").await?;
|
||||
|
||||
// Create the namespace if it doesn't exist
|
||||
println!("Creating namespace 'database' if it doesn't exist...");
|
||||
match km.namespace_create("database").await {
|
||||
Ok(_) => println!("✓ Namespace 'database' created"),
|
||||
Err(e) => {
|
||||
if e.to_string().contains("already exists") {
|
||||
println!("✓ Namespace 'database' already exists");
|
||||
} else {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any existing resources first
|
||||
println!("Cleaning up any existing PostgreSQL resources...");
|
||||
match km.deployment_delete("postgres-cluster").await {
|
||||
Ok(_) => println!("✓ Deleted existing deployment"),
|
||||
Err(_) => println!("✓ No existing deployment to delete"),
|
||||
}
|
||||
|
||||
match km.service_delete("postgres-cluster").await {
|
||||
Ok(_) => println!("✓ Deleted existing service"),
|
||||
Err(_) => println!("✓ No existing service to delete"),
|
||||
}
|
||||
|
||||
// Configure PostgreSQL-specific labels
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "postgres-cluster".to_string());
|
||||
labels.insert("type".to_string(), "database".to_string());
|
||||
labels.insert("engine".to_string(), "postgresql".to_string());
|
||||
|
||||
// Configure PostgreSQL environment variables
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("POSTGRES_DB".to_string(), "myapp".to_string());
|
||||
env_vars.insert("POSTGRES_USER".to_string(), "postgres".to_string());
|
||||
env_vars.insert(
|
||||
"POSTGRES_PASSWORD".to_string(),
|
||||
"secretpassword".to_string(),
|
||||
);
|
||||
env_vars.insert(
|
||||
"PGDATA".to_string(),
|
||||
"/var/lib/postgresql/data/pgdata".to_string(),
|
||||
);
|
||||
|
||||
// Deploy the PostgreSQL cluster using the convenience method
|
||||
println!("Deploying PostgreSQL cluster...");
|
||||
km.deploy_application(
|
||||
"postgres-cluster", // name
|
||||
"postgres:15", // image
|
||||
2, // replicas (1 master + 1 replica)
|
||||
5432, // port
|
||||
Some(labels), // labels
|
||||
Some(env_vars), // environment variables
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("✅ PostgreSQL cluster deployed successfully!");
|
||||
|
||||
// Check deployment status
|
||||
let deployments = km.deployments_list().await?;
|
||||
let postgres_deployment = deployments
|
||||
.iter()
|
||||
.find(|d| d.metadata.name.as_ref() == Some(&"postgres-cluster".to_string()));
|
||||
|
||||
if let Some(deployment) = postgres_deployment {
|
||||
let total_replicas = deployment
|
||||
.spec
|
||||
.as_ref()
|
||||
.and_then(|s| s.replicas)
|
||||
.unwrap_or(0);
|
||||
let ready_replicas = deployment
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.ready_replicas)
|
||||
.unwrap_or(0);
|
||||
|
||||
println!(
|
||||
"Deployment status: {}/{} replicas ready",
|
||||
ready_replicas, total_replicas
|
||||
);
|
||||
}
|
||||
|
||||
println!("\n📋 Connection Information:");
|
||||
println!(" Host: postgres-cluster.database.svc.cluster.local");
|
||||
println!(" Port: 5432");
|
||||
println!(" Database: postgres (default)");
|
||||
println!(" Username: postgres (default)");
|
||||
println!(" Password: Set POSTGRES_PASSWORD environment variable");
|
||||
|
||||
println!("\n🔧 To connect from another pod:");
|
||||
println!(" psql -h postgres-cluster.database.svc.cluster.local -U postgres");
|
||||
|
||||
println!("\n💡 Next steps:");
|
||||
println!(" • Set environment variables for database credentials");
|
||||
println!(" • Add persistent volume claims for data storage");
|
||||
println!(" • Configure backup and monitoring");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
//! Redis Cluster Deployment Example (Rhai)
|
||||
//!
|
||||
//! This script shows how to deploy a Redis cluster using Rhai scripting
|
||||
//! with the KubernetesManager convenience methods.
|
||||
|
||||
print("=== Redis Cluster Deployment ===");
|
||||
|
||||
// Create Kubernetes manager for the cache namespace
|
||||
print("Creating Kubernetes manager for 'cache' namespace...");
|
||||
let km = kubernetes_manager_new("cache");
|
||||
print("✓ Kubernetes manager created");
|
||||
|
||||
// Create the namespace if it doesn't exist
|
||||
print("Creating namespace 'cache' if it doesn't exist...");
|
||||
try {
|
||||
create_namespace(km, "cache");
|
||||
print("✓ Namespace 'cache' created");
|
||||
} catch(e) {
|
||||
if e.to_string().contains("already exists") {
|
||||
print("✓ Namespace 'cache' already exists");
|
||||
} else {
|
||||
print("⚠️ Warning: " + e);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any existing resources first
|
||||
print("\nCleaning up any existing Redis resources...");
|
||||
try {
|
||||
delete_deployment(km, "redis-cluster");
|
||||
print("✓ Deleted existing deployment");
|
||||
} catch(e) {
|
||||
print("✓ No existing deployment to delete");
|
||||
}
|
||||
|
||||
try {
|
||||
delete_service(km, "redis-cluster");
|
||||
print("✓ Deleted existing service");
|
||||
} catch(e) {
|
||||
print("✓ No existing service to delete");
|
||||
}
|
||||
|
||||
// Create Redis cluster using the convenience method
|
||||
print("\nDeploying Redis cluster...");
|
||||
|
||||
try {
|
||||
// Deploy Redis using the convenience method
|
||||
let result = deploy_application(km, "redis-cluster", "redis:7-alpine", 3, 6379, #{
|
||||
"app": "redis-cluster",
|
||||
"type": "cache",
|
||||
"engine": "redis"
|
||||
}, #{
|
||||
"REDIS_PASSWORD": "redispassword",
|
||||
"REDIS_PORT": "6379",
|
||||
"REDIS_DATABASES": "16",
|
||||
"REDIS_MAXMEMORY": "256mb",
|
||||
"REDIS_MAXMEMORY_POLICY": "allkeys-lru"
|
||||
});
|
||||
print("✓ " + result);
|
||||
|
||||
print("\n✅ Redis cluster deployed successfully!");
|
||||
|
||||
print("\n📋 Connection Information:");
|
||||
print(" Host: redis-cluster.cache.svc.cluster.local");
|
||||
print(" Port: 6379");
|
||||
|
||||
print("\n🔧 To connect from another pod:");
|
||||
print(" redis-cli -h redis-cluster.cache.svc.cluster.local");
|
||||
|
||||
print("\n💡 Next steps:");
|
||||
print(" • Configure Redis authentication");
|
||||
print(" • Set up Redis clustering configuration");
|
||||
print(" • Add persistent storage");
|
||||
print(" • Configure memory policies");
|
||||
|
||||
} catch(e) {
|
||||
print("❌ Failed to deploy Redis cluster: " + e);
|
||||
}
|
||||
|
||||
print("\n=== Deployment Complete ===");
|
||||
@@ -1,109 +0,0 @@
|
||||
//! Redis Cluster Deployment Example
|
||||
//!
|
||||
//! This example shows how to deploy a Redis cluster using the
|
||||
//! KubernetesManager convenience methods.
|
||||
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create Kubernetes manager for the cache namespace
|
||||
let km = KubernetesManager::new("cache").await?;
|
||||
|
||||
// Create the namespace if it doesn't exist
|
||||
println!("Creating namespace 'cache' if it doesn't exist...");
|
||||
match km.namespace_create("cache").await {
|
||||
Ok(_) => println!("✓ Namespace 'cache' created"),
|
||||
Err(e) => {
|
||||
if e.to_string().contains("already exists") {
|
||||
println!("✓ Namespace 'cache' already exists");
|
||||
} else {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any existing resources first
|
||||
println!("Cleaning up any existing Redis resources...");
|
||||
match km.deployment_delete("redis-cluster").await {
|
||||
Ok(_) => println!("✓ Deleted existing deployment"),
|
||||
Err(_) => println!("✓ No existing deployment to delete"),
|
||||
}
|
||||
|
||||
match km.service_delete("redis-cluster").await {
|
||||
Ok(_) => println!("✓ Deleted existing service"),
|
||||
Err(_) => println!("✓ No existing service to delete"),
|
||||
}
|
||||
|
||||
// Configure Redis-specific labels
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "redis-cluster".to_string());
|
||||
labels.insert("type".to_string(), "cache".to_string());
|
||||
labels.insert("engine".to_string(), "redis".to_string());
|
||||
|
||||
// Configure Redis environment variables
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("REDIS_PASSWORD".to_string(), "redispassword".to_string());
|
||||
env_vars.insert("REDIS_PORT".to_string(), "6379".to_string());
|
||||
env_vars.insert("REDIS_DATABASES".to_string(), "16".to_string());
|
||||
env_vars.insert("REDIS_MAXMEMORY".to_string(), "256mb".to_string());
|
||||
env_vars.insert(
|
||||
"REDIS_MAXMEMORY_POLICY".to_string(),
|
||||
"allkeys-lru".to_string(),
|
||||
);
|
||||
|
||||
// Deploy the Redis cluster using the convenience method
|
||||
println!("Deploying Redis cluster...");
|
||||
km.deploy_application(
|
||||
"redis-cluster", // name
|
||||
"redis:7-alpine", // image
|
||||
3, // replicas (Redis cluster nodes)
|
||||
6379, // port
|
||||
Some(labels), // labels
|
||||
Some(env_vars), // environment variables
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("✅ Redis cluster deployed successfully!");
|
||||
|
||||
// Check deployment status
|
||||
let deployments = km.deployments_list().await?;
|
||||
let redis_deployment = deployments
|
||||
.iter()
|
||||
.find(|d| d.metadata.name.as_ref() == Some(&"redis-cluster".to_string()));
|
||||
|
||||
if let Some(deployment) = redis_deployment {
|
||||
let total_replicas = deployment
|
||||
.spec
|
||||
.as_ref()
|
||||
.and_then(|s| s.replicas)
|
||||
.unwrap_or(0);
|
||||
let ready_replicas = deployment
|
||||
.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.ready_replicas)
|
||||
.unwrap_or(0);
|
||||
|
||||
println!(
|
||||
"Deployment status: {}/{} replicas ready",
|
||||
ready_replicas, total_replicas
|
||||
);
|
||||
}
|
||||
|
||||
println!("\n📋 Connection Information:");
|
||||
println!(" Host: redis-cluster.cache.svc.cluster.local");
|
||||
println!(" Port: 6379");
|
||||
println!(" Password: Configure REDIS_PASSWORD environment variable");
|
||||
|
||||
println!("\n🔧 To connect from another pod:");
|
||||
println!(" redis-cli -h redis-cluster.cache.svc.cluster.local");
|
||||
|
||||
println!("\n💡 Next steps:");
|
||||
println!(" • Configure Redis authentication with environment variables");
|
||||
println!(" • Set up Redis clustering configuration");
|
||||
println!(" • Add persistent volume claims for data persistence");
|
||||
println!(" • Configure memory limits and eviction policies");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,208 +0,0 @@
|
||||
//! Multi-namespace Kubernetes operations example
|
||||
//!
|
||||
//! This script demonstrates working with multiple namespaces and comparing resources across them.
|
||||
//!
|
||||
//! Prerequisites:
|
||||
//! - A running Kubernetes cluster
|
||||
//! - Valid kubeconfig file or in-cluster configuration
|
||||
//! - Appropriate permissions for the operations
|
||||
//!
|
||||
//! Usage:
|
||||
//! herodo examples/kubernetes/multi_namespace_operations.rhai
|
||||
|
||||
print("=== SAL Kubernetes Multi-Namespace Operations Example ===");
|
||||
|
||||
// Define namespaces to work with
|
||||
let target_namespaces = ["default", "kube-system"];
|
||||
let managers = #{};
|
||||
|
||||
print("Creating managers for multiple namespaces...");
|
||||
|
||||
// Create managers for each namespace
|
||||
for ns in target_namespaces {
|
||||
try {
|
||||
let km = kubernetes_manager_new(ns);
|
||||
managers[ns] = km;
|
||||
print("✓ Created manager for namespace: " + ns);
|
||||
} catch(e) {
|
||||
print("✗ Failed to create manager for " + ns + ": " + e);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to safely get resource counts
|
||||
fn get_safe_counts(km) {
|
||||
try {
|
||||
return resource_counts(km);
|
||||
} catch(e) {
|
||||
print(" Warning: Could not get resource counts - " + e);
|
||||
return #{};
|
||||
}
|
||||
}
|
||||
|
||||
// Function to safely get pod list
|
||||
fn get_safe_pods(km) {
|
||||
try {
|
||||
return pods_list(km);
|
||||
} catch(e) {
|
||||
print(" Warning: Could not list pods - " + e);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// Compare resource counts across namespaces
|
||||
print("\n--- Resource Comparison Across Namespaces ---");
|
||||
let total_resources = #{};
|
||||
|
||||
for ns in target_namespaces {
|
||||
if ns in managers {
|
||||
let km = managers[ns];
|
||||
print("\nNamespace: " + ns);
|
||||
let counts = get_safe_counts(km);
|
||||
|
||||
for resource_type in counts.keys() {
|
||||
let count = counts[resource_type];
|
||||
print(" " + resource_type + ": " + count);
|
||||
|
||||
// Accumulate totals
|
||||
if resource_type in total_resources {
|
||||
total_resources[resource_type] = total_resources[resource_type] + count;
|
||||
} else {
|
||||
total_resources[resource_type] = count;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
print("\n--- Total Resources Across All Namespaces ---");
|
||||
for resource_type in total_resources.keys() {
|
||||
print("Total " + resource_type + ": " + total_resources[resource_type]);
|
||||
}
|
||||
|
||||
// Find namespaces with the most resources
|
||||
print("\n--- Namespace Resource Analysis ---");
|
||||
let namespace_totals = #{};
|
||||
|
||||
for ns in target_namespaces {
|
||||
if ns in managers {
|
||||
let km = managers[ns];
|
||||
let counts = get_safe_counts(km);
|
||||
let total = 0;
|
||||
|
||||
for resource_type in counts.keys() {
|
||||
total = total + counts[resource_type];
|
||||
}
|
||||
|
||||
namespace_totals[ns] = total;
|
||||
print("Namespace '" + ns + "' has " + total + " total resources");
|
||||
}
|
||||
}
|
||||
|
||||
// Find the busiest namespace
|
||||
let busiest_ns = "";
|
||||
let max_resources = 0;
|
||||
for ns in namespace_totals.keys() {
|
||||
if namespace_totals[ns] > max_resources {
|
||||
max_resources = namespace_totals[ns];
|
||||
busiest_ns = ns;
|
||||
}
|
||||
}
|
||||
|
||||
if busiest_ns != "" {
|
||||
print("🏆 Busiest namespace: '" + busiest_ns + "' with " + max_resources + " resources");
|
||||
}
|
||||
|
||||
// Detailed pod analysis
|
||||
print("\n--- Pod Analysis Across Namespaces ---");
|
||||
let all_pods = [];
|
||||
|
||||
for ns in target_namespaces {
|
||||
if ns in managers {
|
||||
let km = managers[ns];
|
||||
let pods = get_safe_pods(km);
|
||||
|
||||
print("\nNamespace '" + ns + "' pods:");
|
||||
if pods.len() == 0 {
|
||||
print(" (no pods)");
|
||||
} else {
|
||||
for pod in pods {
|
||||
print(" - " + pod);
|
||||
all_pods.push(ns + "/" + pod);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
print("\n--- All Pods Summary ---");
|
||||
print("Total pods across all namespaces: " + all_pods.len());
|
||||
|
||||
// Look for common pod name patterns
|
||||
print("\n--- Pod Name Pattern Analysis ---");
|
||||
let patterns = #{
|
||||
"system": 0,
|
||||
"kube": 0,
|
||||
"coredns": 0,
|
||||
"proxy": 0,
|
||||
"controller": 0
|
||||
};
|
||||
|
||||
for pod_full_name in all_pods {
|
||||
let pod_name = pod_full_name.to_lower();
|
||||
|
||||
for pattern in patterns.keys() {
|
||||
if pod_name.contains(pattern) {
|
||||
patterns[pattern] = patterns[pattern] + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
print("Common pod name patterns found:");
|
||||
for pattern in patterns.keys() {
|
||||
if patterns[pattern] > 0 {
|
||||
print(" '" + pattern + "': " + patterns[pattern] + " pods");
|
||||
}
|
||||
}
|
||||
|
||||
// Namespace health check
|
||||
print("\n--- Namespace Health Check ---");
|
||||
for ns in target_namespaces {
|
||||
if ns in managers {
|
||||
let km = managers[ns];
|
||||
print("\nChecking namespace: " + ns);
|
||||
|
||||
// Check if namespace exists (should always be true for our managers)
|
||||
let exists = namespace_exists(km, ns);
|
||||
if exists {
|
||||
print(" ✓ Namespace exists and is accessible");
|
||||
} else {
|
||||
print(" ✗ Namespace existence check failed");
|
||||
}
|
||||
|
||||
// Try to get resource counts as a health indicator
|
||||
let counts = get_safe_counts(km);
|
||||
if counts.len() > 0 {
|
||||
print(" ✓ Can access resources (" + counts.len() + " resource types)");
|
||||
} else {
|
||||
print(" ⚠ No resources found or access limited");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a summary report
|
||||
print("\n--- Summary Report ---");
|
||||
print("Namespaces analyzed: " + target_namespaces.len());
|
||||
print("Total unique resource types: " + total_resources.len());
|
||||
|
||||
let grand_total = 0;
|
||||
for resource_type in total_resources.keys() {
|
||||
grand_total = grand_total + total_resources[resource_type];
|
||||
}
|
||||
print("Grand total resources: " + grand_total);
|
||||
|
||||
print("\nResource breakdown:");
|
||||
for resource_type in total_resources.keys() {
|
||||
let count = total_resources[resource_type];
|
||||
let percentage = (count * 100) / grand_total;
|
||||
print(" " + resource_type + ": " + count + " (" + percentage + "%)");
|
||||
}
|
||||
|
||||
print("\n=== Multi-namespace operations example completed! ===");
|
||||
@@ -1,95 +0,0 @@
|
||||
//! Kubernetes namespace management example
|
||||
//!
|
||||
//! This script demonstrates namespace creation and management operations.
|
||||
//!
|
||||
//! Prerequisites:
|
||||
//! - A running Kubernetes cluster
|
||||
//! - Valid kubeconfig file or in-cluster configuration
|
||||
//! - Permissions to create and manage namespaces
|
||||
//!
|
||||
//! Usage:
|
||||
//! herodo examples/kubernetes/namespace_management.rhai
|
||||
|
||||
print("=== SAL Kubernetes Namespace Management Example ===");
|
||||
|
||||
// Create a KubernetesManager
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("Created KubernetesManager for namespace: " + namespace(km));
|
||||
|
||||
// Define test namespace names
|
||||
let test_namespaces = [
|
||||
"sal-test-namespace-1",
|
||||
"sal-test-namespace-2",
|
||||
"sal-example-app"
|
||||
];
|
||||
|
||||
print("\n--- Creating Test Namespaces ---");
|
||||
for ns in test_namespaces {
|
||||
print("Creating namespace: " + ns);
|
||||
try {
|
||||
namespace_create(km, ns);
|
||||
print("✓ Successfully created namespace: " + ns);
|
||||
} catch(e) {
|
||||
print("✗ Failed to create namespace " + ns + ": " + e);
|
||||
}
|
||||
}
|
||||
|
||||
// Wait a moment for namespaces to be created
|
||||
print("\nWaiting for namespaces to be ready...");
|
||||
|
||||
// Verify namespaces were created
|
||||
print("\n--- Verifying Namespace Creation ---");
|
||||
for ns in test_namespaces {
|
||||
let exists = namespace_exists(km, ns);
|
||||
if exists {
|
||||
print("✓ Namespace '" + ns + "' exists");
|
||||
} else {
|
||||
print("✗ Namespace '" + ns + "' was not found");
|
||||
}
|
||||
}
|
||||
|
||||
// List all namespaces to see our new ones
|
||||
print("\n--- Current Namespaces ---");
|
||||
let all_namespaces = namespaces_list(km);
|
||||
print("Total namespaces in cluster: " + all_namespaces.len());
|
||||
for ns in all_namespaces {
|
||||
if ns.starts_with("sal-") {
|
||||
print(" 🔹 " + ns + " (created by this example)");
|
||||
} else {
|
||||
print(" - " + ns);
|
||||
}
|
||||
}
|
||||
|
||||
// Test idempotent creation (creating the same namespace again)
|
||||
print("\n--- Testing Idempotent Creation ---");
|
||||
let test_ns = test_namespaces[0];
|
||||
print("Attempting to create existing namespace: " + test_ns);
|
||||
try {
|
||||
namespace_create(km, test_ns);
|
||||
print("✓ Idempotent creation successful (no error for existing namespace)");
|
||||
} catch(e) {
|
||||
print("✗ Unexpected error during idempotent creation: " + e);
|
||||
}
|
||||
|
||||
// Create managers for the new namespaces and check their properties
|
||||
print("\n--- Creating Managers for New Namespaces ---");
|
||||
for ns in test_namespaces {
|
||||
try {
|
||||
let ns_km = kubernetes_manager_new(ns);
|
||||
print("✓ Created manager for namespace: " + namespace(ns_km));
|
||||
|
||||
// Get resource counts for the new namespace (should be mostly empty)
|
||||
let counts = resource_counts(ns_km);
|
||||
print(" Resource counts: " + counts);
|
||||
} catch(e) {
|
||||
print("✗ Failed to create manager for " + ns + ": " + e);
|
||||
}
|
||||
}
|
||||
|
||||
print("\n--- Cleanup Instructions ---");
|
||||
print("To clean up the test namespaces created by this example, run:");
|
||||
for ns in test_namespaces {
|
||||
print(" kubectl delete namespace " + ns);
|
||||
}
|
||||
|
||||
print("\n=== Namespace management example completed! ===");
|
||||
@@ -1,157 +0,0 @@
|
||||
//! Kubernetes pattern-based deletion example
|
||||
//!
|
||||
//! This script demonstrates how to use PCRE patterns to delete multiple resources.
|
||||
//!
|
||||
//! ⚠️ WARNING: This example includes actual deletion operations!
|
||||
//! ⚠️ Only run this in a test environment!
|
||||
//!
|
||||
//! Prerequisites:
|
||||
//! - A running Kubernetes cluster (preferably a test cluster)
|
||||
//! - Valid kubeconfig file or in-cluster configuration
|
||||
//! - Permissions to delete resources
|
||||
//!
|
||||
//! Usage:
|
||||
//! herodo examples/kubernetes/pattern_deletion.rhai
|
||||
|
||||
print("=== SAL Kubernetes Pattern Deletion Example ===");
|
||||
print("⚠️ WARNING: This example will delete resources matching patterns!");
|
||||
print("⚠️ Only run this in a test environment!");
|
||||
|
||||
// Create a KubernetesManager for a test namespace
|
||||
let test_namespace = "sal-pattern-test";
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
print("\nCreating test namespace: " + test_namespace);
|
||||
try {
|
||||
namespace_create(km, test_namespace);
|
||||
print("✓ Test namespace created");
|
||||
} catch(e) {
|
||||
print("Note: " + e);
|
||||
}
|
||||
|
||||
// Switch to the test namespace
|
||||
let test_km = kubernetes_manager_new(test_namespace);
|
||||
print("Switched to namespace: " + namespace(test_km));
|
||||
|
||||
// Show current resources before any operations
|
||||
print("\n--- Current Resources in Test Namespace ---");
|
||||
let counts = resource_counts(test_km);
|
||||
print("Resource counts before operations:");
|
||||
for resource_type in counts.keys() {
|
||||
print(" " + resource_type + ": " + counts[resource_type]);
|
||||
}
|
||||
|
||||
// List current pods to see what we're working with
|
||||
let current_pods = pods_list(test_km);
|
||||
print("\nCurrent pods in namespace:");
|
||||
if current_pods.len() == 0 {
|
||||
print(" (no pods found)");
|
||||
} else {
|
||||
for pod in current_pods {
|
||||
print(" - " + pod);
|
||||
}
|
||||
}
|
||||
|
||||
// Demonstrate pattern matching without deletion first
|
||||
print("\n--- Pattern Matching Demo (Dry Run) ---");
|
||||
let test_patterns = [
|
||||
"test-.*", // Match anything starting with "test-"
|
||||
".*-temp$", // Match anything ending with "-temp"
|
||||
"demo-pod-.*", // Match demo pods
|
||||
"nginx-.*", // Match nginx pods
|
||||
"app-[0-9]+", // Match app-1, app-2, etc.
|
||||
];
|
||||
|
||||
for pattern in test_patterns {
|
||||
print("Testing pattern: '" + pattern + "'");
|
||||
|
||||
// Check which pods would match this pattern
|
||||
let matching_pods = [];
|
||||
for pod in current_pods {
|
||||
// Simple pattern matching simulation (Rhai doesn't have regex, so this is illustrative)
|
||||
if pod.contains("test") && pattern == "test-.*" {
|
||||
matching_pods.push(pod);
|
||||
} else if pod.contains("temp") && pattern == ".*-temp$" {
|
||||
matching_pods.push(pod);
|
||||
} else if pod.contains("demo") && pattern == "demo-pod-.*" {
|
||||
matching_pods.push(pod);
|
||||
} else if pod.contains("nginx") && pattern == "nginx-.*" {
|
||||
matching_pods.push(pod);
|
||||
}
|
||||
}
|
||||
|
||||
print(" Would match " + matching_pods.len() + " pods: " + matching_pods);
|
||||
}
|
||||
|
||||
// Example of safe deletion patterns
|
||||
print("\n--- Safe Deletion Examples ---");
|
||||
print("These patterns are designed to be safe for testing:");
|
||||
|
||||
let safe_patterns = [
|
||||
"test-example-.*", // Very specific test resources
|
||||
"sal-demo-.*", // SAL demo resources
|
||||
"temp-resource-.*", // Temporary resources
|
||||
];
|
||||
|
||||
for pattern in safe_patterns {
|
||||
print("\nTesting safe pattern: '" + pattern + "'");
|
||||
|
||||
try {
|
||||
// This will actually attempt deletion, but should be safe in a test environment
|
||||
let deleted_count = delete(test_km, pattern);
|
||||
print("✓ Pattern '" + pattern + "' matched and deleted " + deleted_count + " resources");
|
||||
} catch(e) {
|
||||
print("Note: Pattern '" + pattern + "' - " + e);
|
||||
}
|
||||
}
|
||||
|
||||
// Show resources after deletion attempts
|
||||
print("\n--- Resources After Deletion Attempts ---");
|
||||
let final_counts = resource_counts(test_km);
|
||||
print("Final resource counts:");
|
||||
for resource_type in final_counts.keys() {
|
||||
print(" " + resource_type + ": " + final_counts[resource_type]);
|
||||
}
|
||||
|
||||
// Example of individual resource deletion
|
||||
print("\n--- Individual Resource Deletion Examples ---");
|
||||
print("These functions delete specific resources by name:");
|
||||
|
||||
// These are examples - they will fail if the resources don't exist, which is expected
|
||||
let example_deletions = [
|
||||
["pod", "test-pod-example"],
|
||||
["service", "test-service-example"],
|
||||
["deployment", "test-deployment-example"],
|
||||
];
|
||||
|
||||
for deletion in example_deletions {
|
||||
let resource_type = deletion[0];
|
||||
let resource_name = deletion[1];
|
||||
|
||||
print("Attempting to delete " + resource_type + ": " + resource_name);
|
||||
try {
|
||||
if resource_type == "pod" {
|
||||
pod_delete(test_km, resource_name);
|
||||
} else if resource_type == "service" {
|
||||
service_delete(test_km, resource_name);
|
||||
} else if resource_type == "deployment" {
|
||||
deployment_delete(test_km, resource_name);
|
||||
}
|
||||
print("✓ Successfully deleted " + resource_type + ": " + resource_name);
|
||||
} catch(e) {
|
||||
print("Note: " + resource_type + " '" + resource_name + "' - " + e);
|
||||
}
|
||||
}
|
||||
|
||||
print("\n--- Best Practices for Pattern Deletion ---");
|
||||
print("1. Always test patterns in a safe environment first");
|
||||
print("2. Use specific patterns rather than broad ones");
|
||||
print("3. Consider using dry-run approaches when possible");
|
||||
print("4. Have backups or be able to recreate resources");
|
||||
print("5. Use descriptive naming conventions for easier pattern matching");
|
||||
|
||||
print("\n--- Cleanup ---");
|
||||
print("To clean up the test namespace:");
|
||||
print(" kubectl delete namespace " + test_namespace);
|
||||
|
||||
print("\n=== Pattern deletion example completed! ===");
|
||||
@@ -1,33 +0,0 @@
|
||||
//! Test Kubernetes module registration
|
||||
//!
|
||||
//! This script tests that the Kubernetes module is properly registered
|
||||
//! and available in the Rhai environment.
|
||||
|
||||
print("=== Testing Kubernetes Module Registration ===");
|
||||
|
||||
// Test that we can reference the kubernetes functions
|
||||
print("Testing function registration...");
|
||||
|
||||
// These should not error even if we can't connect to a cluster
|
||||
let functions_to_test = [
|
||||
"kubernetes_manager_new",
|
||||
"pods_list",
|
||||
"services_list",
|
||||
"deployments_list",
|
||||
"delete",
|
||||
"namespace_create",
|
||||
"namespace_exists",
|
||||
"resource_counts",
|
||||
"pod_delete",
|
||||
"service_delete",
|
||||
"deployment_delete",
|
||||
"namespace"
|
||||
];
|
||||
|
||||
for func_name in functions_to_test {
|
||||
print("✓ Function '" + func_name + "' is available");
|
||||
}
|
||||
|
||||
print("\n=== All Kubernetes functions are properly registered! ===");
|
||||
print("Note: To test actual functionality, you need a running Kubernetes cluster.");
|
||||
print("See other examples in this directory for real cluster operations.");
|
||||
@@ -1,116 +0,0 @@
|
||||
# Service Manager Examples
|
||||
|
||||
This directory contains examples demonstrating the SAL service manager functionality for dynamically launching and managing services across platforms.
|
||||
|
||||
## Overview
|
||||
|
||||
The service manager provides a unified interface for managing system services:
|
||||
- **macOS**: Uses `launchctl` for service management
|
||||
- **Linux**: Uses `zinit` for service management (systemd also available as alternative)
|
||||
|
||||
## Examples
|
||||
|
||||
### 1. Circle Worker Manager (`circle_worker_manager.rhai`)
|
||||
|
||||
**Primary Use Case**: Demonstrates dynamic circle worker management for freezone residents.
|
||||
|
||||
This example shows:
|
||||
- Creating service configurations for circle workers
|
||||
- Complete service lifecycle management (start, stop, restart, remove)
|
||||
- Status monitoring and log retrieval
|
||||
- Error handling and cleanup
|
||||
|
||||
```bash
|
||||
# Run the circle worker management example
|
||||
herodo examples/service_manager/circle_worker_manager.rhai
|
||||
```
|
||||
|
||||
### 2. Basic Usage (`basic_usage.rhai`)
|
||||
|
||||
**Learning Example**: Simple demonstration of the core service manager API.
|
||||
|
||||
This example covers:
|
||||
- Creating and configuring services
|
||||
- Starting and stopping services
|
||||
- Checking service status
|
||||
- Listing managed services
|
||||
- Retrieving service logs
|
||||
|
||||
```bash
|
||||
# Run the basic usage example
|
||||
herodo examples/service_manager/basic_usage.rhai
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Linux (zinit)
|
||||
|
||||
Make sure zinit is installed and running:
|
||||
|
||||
```bash
|
||||
# Start zinit with default socket
|
||||
zinit -s /tmp/zinit.sock init
|
||||
```
|
||||
|
||||
### macOS (launchctl)
|
||||
|
||||
No additional setup required - uses the built-in launchctl system.
|
||||
|
||||
## Service Manager API
|
||||
|
||||
The service manager provides these key functions:
|
||||
|
||||
- `create_service_manager()` - Create platform-appropriate service manager
|
||||
- `start(manager, config)` - Start a new service
|
||||
- `stop(manager, service_name)` - Stop a running service
|
||||
- `restart(manager, service_name)` - Restart a service
|
||||
- `status(manager, service_name)` - Get service status
|
||||
- `logs(manager, service_name, lines)` - Retrieve service logs
|
||||
- `list(manager)` - List all managed services
|
||||
- `remove(manager, service_name)` - Remove a service
|
||||
- `exists(manager, service_name)` - Check if service exists
|
||||
- `start_and_confirm(manager, config, timeout)` - Start with confirmation
|
||||
|
||||
## Service Configuration
|
||||
|
||||
Services are configured using a map with these fields:
|
||||
|
||||
```rhai
|
||||
let config = #{
|
||||
name: "my-service", // Service name
|
||||
binary_path: "/usr/bin/my-app", // Executable path
|
||||
args: ["--config", "/etc/my-app.conf"], // Command arguments
|
||||
working_directory: "/var/lib/my-app", // Working directory (optional)
|
||||
environment: #{ // Environment variables
|
||||
"VAR1": "value1",
|
||||
"VAR2": "value2"
|
||||
},
|
||||
auto_restart: true // Auto-restart on failure
|
||||
};
|
||||
```
|
||||
|
||||
## Real-World Usage
|
||||
|
||||
The circle worker example demonstrates the exact use case requested by the team:
|
||||
|
||||
> "We want to be able to launch circle workers dynamically. For instance when someone registers to the freezone, we need to be able to launch a circle worker for the new resident."
|
||||
|
||||
The service manager enables:
|
||||
1. **Dynamic service creation** - Create services on-demand for new residents
|
||||
2. **Cross-platform support** - Works on both macOS and Linux
|
||||
3. **Lifecycle management** - Full control over service lifecycle
|
||||
4. **Monitoring and logging** - Track service status and retrieve logs
|
||||
5. **Cleanup** - Proper service removal when no longer needed
|
||||
|
||||
## Error Handling
|
||||
|
||||
All service manager functions can throw errors. Use try-catch blocks for robust error handling:
|
||||
|
||||
```rhai
|
||||
try {
|
||||
sm::start(manager, config);
|
||||
print("✅ Service started successfully");
|
||||
} catch (error) {
|
||||
print(`❌ Failed to start service: ${error}`);
|
||||
}
|
||||
```
|
||||
@@ -1,85 +0,0 @@
|
||||
// Basic Service Manager Usage Example
|
||||
//
|
||||
// This example demonstrates the basic API of the service manager.
|
||||
// It works on both macOS (launchctl) and Linux (zinit/systemd).
|
||||
//
|
||||
// Prerequisites:
|
||||
//
|
||||
// Linux: The service manager will automatically discover running zinit servers
|
||||
// or fall back to systemd. To use zinit, start it with:
|
||||
// zinit -s /tmp/zinit.sock init
|
||||
//
|
||||
// You can also specify a custom socket path:
|
||||
// export ZINIT_SOCKET_PATH=/your/custom/path/zinit.sock
|
||||
//
|
||||
// macOS: No additional setup required (uses launchctl).
|
||||
//
|
||||
// Usage:
|
||||
// herodo examples/service_manager/basic_usage.rhai
|
||||
|
||||
// Service Manager Basic Usage Example
|
||||
// This example uses the SAL service manager through Rhai integration
|
||||
|
||||
print("🚀 Basic Service Manager Usage Example");
|
||||
print("======================================");
|
||||
|
||||
// Create a service manager for the current platform
|
||||
let manager = create_service_manager();
|
||||
|
||||
print("🍎 Using service manager for current platform");
|
||||
|
||||
// Create a simple service configuration
|
||||
let config = #{
|
||||
name: "example-service",
|
||||
binary_path: "/bin/echo",
|
||||
args: ["Hello from service manager!"],
|
||||
working_directory: "/tmp",
|
||||
environment: #{
|
||||
"EXAMPLE_VAR": "hello_world"
|
||||
},
|
||||
auto_restart: false
|
||||
};
|
||||
|
||||
print("\n📝 Service Configuration:");
|
||||
print(` Name: ${config.name}`);
|
||||
print(` Binary: ${config.binary_path}`);
|
||||
print(` Args: ${config.args}`);
|
||||
|
||||
// Start the service
|
||||
print("\n🚀 Starting service...");
|
||||
start(manager, config);
|
||||
print("✅ Service started successfully");
|
||||
|
||||
// Check service status
|
||||
print("\n📊 Checking service status...");
|
||||
let status = status(manager, "example-service");
|
||||
print(`Status: ${status}`);
|
||||
|
||||
// List all services
|
||||
print("\n📋 Listing all managed services...");
|
||||
let services = list(manager);
|
||||
print(`Found ${services.len()} services:`);
|
||||
for service in services {
|
||||
print(` - ${service}`);
|
||||
}
|
||||
|
||||
// Get service logs
|
||||
print("\n📄 Getting service logs...");
|
||||
let logs = logs(manager, "example-service", 5);
|
||||
if logs.trim() == "" {
|
||||
print("No logs available");
|
||||
} else {
|
||||
print(`Logs:\n${logs}`);
|
||||
}
|
||||
|
||||
// Stop the service
|
||||
print("\n🛑 Stopping service...");
|
||||
stop(manager, "example-service");
|
||||
print("✅ Service stopped");
|
||||
|
||||
// Remove the service
|
||||
print("\n🗑️ Removing service...");
|
||||
remove(manager, "example-service");
|
||||
print("✅ Service removed");
|
||||
|
||||
print("\n🎉 Example completed successfully!");
|
||||
@@ -1,141 +0,0 @@
|
||||
// Circle Worker Manager Example
|
||||
//
|
||||
// This example demonstrates how to use the service manager to dynamically launch
|
||||
// circle workers for new freezone residents. This is the primary use case requested
|
||||
// by the team.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// On macOS (uses launchctl):
|
||||
// herodo examples/service_manager/circle_worker_manager.rhai
|
||||
//
|
||||
// On Linux (uses zinit - requires zinit to be running):
|
||||
// First start zinit: zinit -s /tmp/zinit.sock init
|
||||
// herodo examples/service_manager/circle_worker_manager.rhai
|
||||
|
||||
// Circle Worker Manager Example
|
||||
// This example uses the SAL service manager through Rhai integration
|
||||
|
||||
print("🚀 Circle Worker Manager Example");
|
||||
print("=================================");
|
||||
|
||||
// Create the appropriate service manager for the current platform
|
||||
let service_manager = create_service_manager();
|
||||
print("✅ Created service manager for current platform");
|
||||
|
||||
// Simulate a new freezone resident registration
|
||||
let resident_id = "resident_12345";
|
||||
let worker_name = `circle-worker-${resident_id}`;
|
||||
|
||||
print(`\n📝 New freezone resident registered: ${resident_id}`);
|
||||
print(`🔧 Creating circle worker service: ${worker_name}`);
|
||||
|
||||
// Create service configuration for the circle worker
|
||||
let config = #{
|
||||
name: worker_name,
|
||||
binary_path: "/bin/sh",
|
||||
args: [
|
||||
"-c",
|
||||
`echo 'Circle worker for ${resident_id} starting...'; sleep 30; echo 'Circle worker for ${resident_id} completed'`
|
||||
],
|
||||
working_directory: "/tmp",
|
||||
environment: #{
|
||||
"RESIDENT_ID": resident_id,
|
||||
"WORKER_TYPE": "circle",
|
||||
"LOG_LEVEL": "info"
|
||||
},
|
||||
auto_restart: true
|
||||
};
|
||||
|
||||
print("📋 Service configuration created:");
|
||||
print(` Name: ${config.name}`);
|
||||
print(` Binary: ${config.binary_path}`);
|
||||
print(` Args: ${config.args}`);
|
||||
print(` Auto-restart: ${config.auto_restart}`);
|
||||
|
||||
print(`\n🔄 Demonstrating service lifecycle for: ${worker_name}`);
|
||||
|
||||
// 1. Check if service already exists
|
||||
print("\n1️⃣ Checking if service exists...");
|
||||
if exists(service_manager, worker_name) {
|
||||
print("⚠️ Service already exists, removing it first...");
|
||||
remove(service_manager, worker_name);
|
||||
print("🗑️ Existing service removed");
|
||||
} else {
|
||||
print("✅ Service doesn't exist, ready to create");
|
||||
}
|
||||
|
||||
// 2. Start the service
|
||||
print("\n2️⃣ Starting the circle worker service...");
|
||||
start(service_manager, config);
|
||||
print("✅ Service started successfully");
|
||||
|
||||
// 3. Check service status
|
||||
print("\n3️⃣ Checking service status...");
|
||||
let status = status(service_manager, worker_name);
|
||||
print(`📊 Service status: ${status}`);
|
||||
|
||||
// 4. List all services to show our service is there
|
||||
print("\n4️⃣ Listing all managed services...");
|
||||
let services = list(service_manager);
|
||||
print(`📋 Managed services (${services.len()}):`);
|
||||
for service in services {
|
||||
let marker = if service == worker_name { "👉" } else { " " };
|
||||
print(` ${marker} ${service}`);
|
||||
}
|
||||
|
||||
// 5. Wait a moment and check status again
|
||||
print("\n5️⃣ Waiting 3 seconds and checking status again...");
|
||||
sleep(3000); // 3 seconds in milliseconds
|
||||
let status = status(service_manager, worker_name);
|
||||
print(`📊 Service status after 3s: ${status}`);
|
||||
|
||||
// 6. Get service logs
|
||||
print("\n6️⃣ Retrieving service logs...");
|
||||
let logs = logs(service_manager, worker_name, 10);
|
||||
if logs.trim() == "" {
|
||||
print("📄 No logs available yet (this is normal for new services)");
|
||||
} else {
|
||||
print("📄 Recent logs:");
|
||||
let log_lines = logs.split('\n');
|
||||
for i in 0..5 {
|
||||
if i < log_lines.len() {
|
||||
print(` ${log_lines[i]}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 7. Demonstrate start_and_confirm with timeout
|
||||
print("\n7️⃣ Testing start_and_confirm (should succeed quickly since already running)...");
|
||||
start_and_confirm(service_manager, config, 5);
|
||||
print("✅ Service confirmed running within timeout");
|
||||
|
||||
// 8. Stop the service
|
||||
print("\n8️⃣ Stopping the service...");
|
||||
stop(service_manager, worker_name);
|
||||
print("🛑 Service stopped");
|
||||
|
||||
// 9. Check status after stopping
|
||||
print("\n9️⃣ Checking status after stop...");
|
||||
let status = status(service_manager, worker_name);
|
||||
print(`📊 Service status after stop: ${status}`);
|
||||
|
||||
// 10. Restart the service
|
||||
print("\n🔟 Restarting the service...");
|
||||
restart(service_manager, worker_name);
|
||||
print("🔄 Service restarted successfully");
|
||||
|
||||
// 11. Final cleanup
|
||||
print("\n🧹 Cleaning up - removing the service...");
|
||||
remove(service_manager, worker_name);
|
||||
print("🗑️ Service removed successfully");
|
||||
|
||||
// 12. Verify removal
|
||||
print("\n✅ Verifying service removal...");
|
||||
if !exists(service_manager, worker_name) {
|
||||
print("✅ Service successfully removed");
|
||||
} else {
|
||||
print("⚠️ Service still exists after removal");
|
||||
}
|
||||
|
||||
print("\n🎉 Circle worker management demonstration complete!");
|
||||
@@ -1,18 +1,9 @@
|
||||
# SAL Git Package (`sal-git`)
|
||||
# SAL `git` Module
|
||||
|
||||
The `sal-git` package provides comprehensive functionalities for interacting with Git repositories. It offers both high-level abstractions for common Git workflows and a flexible executor for running arbitrary Git commands with integrated authentication.
|
||||
The `git` module in SAL provides comprehensive functionalities for interacting with Git repositories. It offers both high-level abstractions for common Git workflows and a flexible executor for running arbitrary Git commands with integrated authentication.
|
||||
|
||||
This module is central to SAL's capabilities for managing source code, enabling automation of development tasks, and integrating with version control systems.
|
||||
|
||||
## Installation
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-git = "0.1.0"
|
||||
```
|
||||
|
||||
## Core Components
|
||||
|
||||
The module is primarily composed of two main parts:
|
||||
|
||||
@@ -18,8 +18,8 @@ path = "src/main.rs"
|
||||
env_logger = { workspace = true }
|
||||
rhai = { workspace = true }
|
||||
|
||||
# SAL library for Rhai module registration (with all features for herodo)
|
||||
sal = { path = "..", features = ["all"] }
|
||||
# SAL library for Rhai module registration
|
||||
sal = { path = ".." }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -15,32 +15,14 @@ Herodo is a command-line utility that executes Rhai scripts with full access to
|
||||
|
||||
## Installation
|
||||
|
||||
### Build and Install
|
||||
Build the herodo binary:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/PlanetFirst/sal.git
|
||||
cd sal
|
||||
./build_herodo.sh
|
||||
cd herodo
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
This script will:
|
||||
- Build herodo in debug mode
|
||||
- Install it to `~/hero/bin/herodo` (non-root) or `/usr/local/bin/herodo` (root)
|
||||
- Make it available in your PATH
|
||||
|
||||
**Note**: If using the non-root installation, make sure `~/hero/bin` is in your PATH:
|
||||
```bash
|
||||
export PATH="$HOME/hero/bin:$PATH"
|
||||
```
|
||||
|
||||
### Install from crates.io (Coming Soon)
|
||||
|
||||
```bash
|
||||
# This will be available once herodo is published to crates.io
|
||||
cargo install herodo
|
||||
```
|
||||
|
||||
**Note**: `herodo` is not yet published to crates.io due to publishing rate limits. It will be available soon.
|
||||
The executable will be available at `target/release/herodo`.
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
[package]
|
||||
name = "sal-kubernetes"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["PlanetFirst <info@incubaid.com>"]
|
||||
description = "SAL Kubernetes - Kubernetes cluster management and operations using kube-rs SDK"
|
||||
repository = "https://git.threefold.info/herocode/sal"
|
||||
license = "Apache-2.0"
|
||||
keywords = ["kubernetes", "k8s", "cluster", "container", "orchestration"]
|
||||
categories = ["api-bindings", "development-tools"]
|
||||
|
||||
[dependencies]
|
||||
# Kubernetes client library
|
||||
kube = { version = "0.95.0", features = ["client", "config", "derive"] }
|
||||
k8s-openapi = { version = "0.23.0", features = ["latest"] }
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.45.0", features = ["full"] }
|
||||
|
||||
# Production safety features
|
||||
tokio-retry = "0.3.0"
|
||||
governor = "0.6.3"
|
||||
tower = { version = "0.5.2", features = ["timeout", "limit"] }
|
||||
|
||||
# Error handling
|
||||
thiserror = "2.0.12"
|
||||
anyhow = "1.0.98"
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_yaml = "0.9"
|
||||
|
||||
# Regular expressions for pattern matching
|
||||
regex = "1.10.2"
|
||||
|
||||
# Logging
|
||||
log = "0.4"
|
||||
|
||||
# Rhai scripting support (optional)
|
||||
rhai = { version = "1.12.0", features = ["sync"], optional = true }
|
||||
once_cell = "1.20.2"
|
||||
|
||||
# UUID for resource identification
|
||||
uuid = { version = "1.16.0", features = ["v4"] }
|
||||
|
||||
# Base64 encoding for secrets
|
||||
base64 = "0.22.1"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.5"
|
||||
tokio-test = "0.4.4"
|
||||
env_logger = "0.11.5"
|
||||
|
||||
[features]
|
||||
default = ["rhai"]
|
||||
rhai = ["dep:rhai"]
|
||||
@@ -1,443 +0,0 @@
|
||||
# SAL Kubernetes (`sal-kubernetes`)
|
||||
|
||||
Kubernetes cluster management and operations for the System Abstraction Layer (SAL).
|
||||
|
||||
## Installation
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-kubernetes = "0.1.0"
|
||||
```
|
||||
|
||||
## ⚠️ **IMPORTANT SECURITY NOTICE**
|
||||
|
||||
**This package includes destructive operations that can permanently delete Kubernetes resources!**
|
||||
|
||||
- The `delete(pattern)` function uses PCRE regex patterns to bulk delete resources
|
||||
- **Always test patterns in a safe environment first**
|
||||
- Use specific patterns to avoid accidental deletion of critical resources
|
||||
- Consider the impact on dependent resources before deletion
|
||||
- **No confirmation prompts** - deletions are immediate and irreversible
|
||||
|
||||
## Overview
|
||||
|
||||
This package provides a high-level interface for managing Kubernetes clusters using the `kube-rs` SDK. It focuses on namespace-scoped operations through the `KubernetesManager` factory pattern.
|
||||
|
||||
### Production Safety Features
|
||||
|
||||
- **Configurable Timeouts**: All operations have configurable timeouts to prevent hanging
|
||||
- **Exponential Backoff Retry**: Automatic retry logic for transient failures
|
||||
- **Rate Limiting**: Built-in rate limiting to prevent API overload
|
||||
- **Comprehensive Error Handling**: Detailed error types and proper error propagation
|
||||
- **Structured Logging**: Production-ready logging for monitoring and debugging
|
||||
|
||||
## Features
|
||||
|
||||
- **Application Deployment**: Deploy complete applications with a single method call
|
||||
- **Environment Variables & Labels**: Configure containers with environment variables and Kubernetes labels
|
||||
- **Resource Lifecycle Management**: Automatic cleanup and replacement of existing resources
|
||||
- **Namespace-scoped Management**: Each `KubernetesManager` instance operates on a single namespace
|
||||
- **Pod Management**: List, create, and manage pods
|
||||
- **Pattern-based Deletion**: Delete resources using PCRE pattern matching
|
||||
- **Namespace Operations**: Create and manage namespaces (idempotent operations)
|
||||
- **Resource Management**: Support for pods, services, deployments, configmaps, secrets, and more
|
||||
- **Rhai Integration**: Full scripting support through Rhai wrappers with environment variables
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Labels vs Environment Variables
|
||||
|
||||
Understanding the difference between labels and environment variables is crucial for effective Kubernetes deployments:
|
||||
|
||||
#### **Labels** (Kubernetes Metadata)
|
||||
|
||||
- **Purpose**: Organize, select, and manage Kubernetes resources
|
||||
- **Scope**: Kubernetes cluster management and resource organization
|
||||
- **Visibility**: Used by Kubernetes controllers, selectors, and monitoring systems
|
||||
- **Examples**: `app=my-app`, `tier=backend`, `environment=production`, `version=v1.2.3`
|
||||
- **Use Cases**: Resource grouping, service discovery, monitoring labels, deployment strategies
|
||||
|
||||
#### **Environment Variables** (Container Configuration)
|
||||
|
||||
- **Purpose**: Configure application runtime behavior and settings
|
||||
- **Scope**: Inside container processes - available to your application code
|
||||
- **Visibility**: Accessible via `process.env`, `os.environ`, etc. in your application
|
||||
- **Examples**: `NODE_ENV=production`, `DATABASE_URL=postgres://...`, `API_KEY=secret`
|
||||
- **Use Cases**: Database connections, API keys, feature flags, runtime configuration
|
||||
|
||||
#### **Example: Complete Application Configuration**
|
||||
|
||||
```rust
|
||||
// Labels: For Kubernetes resource management
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "web-api".to_string()); // Service discovery
|
||||
labels.insert("tier".to_string(), "backend".to_string()); // Architecture layer
|
||||
labels.insert("environment".to_string(), "production".to_string()); // Deployment stage
|
||||
labels.insert("version".to_string(), "v2.1.0".to_string()); // Release version
|
||||
|
||||
// Environment Variables: For application configuration
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("NODE_ENV".to_string(), "production".to_string()); // Runtime mode
|
||||
env_vars.insert("DATABASE_URL".to_string(), "postgres://db:5432/app".to_string()); // DB connection
|
||||
env_vars.insert("REDIS_URL".to_string(), "redis://cache:6379".to_string()); // Cache connection
|
||||
env_vars.insert("LOG_LEVEL".to_string(), "info".to_string()); // Logging config
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Application Deployment (Recommended)
|
||||
|
||||
Deploy complete applications with labels and environment variables:
|
||||
|
||||
```rust
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let km = KubernetesManager::new("default").await?;
|
||||
|
||||
// Configure labels for Kubernetes resource organization
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "my-app".to_string());
|
||||
labels.insert("tier".to_string(), "backend".to_string());
|
||||
|
||||
// Configure environment variables for the container
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("NODE_ENV".to_string(), "production".to_string());
|
||||
env_vars.insert("DATABASE_URL".to_string(), "postgres://db:5432/myapp".to_string());
|
||||
env_vars.insert("API_KEY".to_string(), "secret-api-key".to_string());
|
||||
|
||||
// Deploy application with deployment + service
|
||||
km.deploy_application(
|
||||
"my-app", // name
|
||||
"node:18-alpine", // image
|
||||
3, // replicas
|
||||
3000, // port
|
||||
Some(labels), // Kubernetes labels
|
||||
Some(env_vars), // container environment variables
|
||||
).await?;
|
||||
|
||||
println!("✅ Application deployed successfully!");
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Basic Operations
|
||||
|
||||
```rust
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a manager for the "default" namespace
|
||||
let km = KubernetesManager::new("default").await?;
|
||||
|
||||
// List all pods in the namespace
|
||||
let pods = km.pods_list().await?;
|
||||
println!("Found {} pods", pods.len());
|
||||
|
||||
// Create a namespace (no error if it already exists)
|
||||
km.namespace_create("my-namespace").await?;
|
||||
|
||||
// Delete resources matching a pattern
|
||||
km.delete("test-.*").await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Rhai Scripting
|
||||
|
||||
```javascript
|
||||
// Create Kubernetes manager for namespace
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
// Deploy application with labels and environment variables
|
||||
deploy_application(km, "my-app", "node:18-alpine", 3, 3000, #{
|
||||
"app": "my-app",
|
||||
"tier": "backend",
|
||||
"environment": "production"
|
||||
}, #{
|
||||
"NODE_ENV": "production",
|
||||
"DATABASE_URL": "postgres://db:5432/myapp",
|
||||
"API_KEY": "secret-api-key"
|
||||
});
|
||||
|
||||
print("✅ Application deployed!");
|
||||
|
||||
// Basic operations
|
||||
let pods = pods_list(km);
|
||||
print("Found " + pods.len() + " pods");
|
||||
|
||||
namespace_create(km, "my-namespace");
|
||||
delete(km, "test-.*");
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `kube`: Kubernetes client library
|
||||
- `k8s-openapi`: Kubernetes API types
|
||||
- `tokio`: Async runtime
|
||||
- `regex`: Pattern matching for resource deletion
|
||||
- `rhai`: Scripting integration (optional)
|
||||
|
||||
## Configuration
|
||||
|
||||
### Kubernetes Authentication
|
||||
|
||||
The package uses the standard Kubernetes configuration methods:
|
||||
|
||||
- In-cluster configuration (when running in a pod)
|
||||
- Kubeconfig file (`~/.kube/config` or `KUBECONFIG` environment variable)
|
||||
- Service account tokens
|
||||
|
||||
### Production Safety Configuration
|
||||
|
||||
```rust
|
||||
use sal_kubernetes::{KubernetesManager, KubernetesConfig};
|
||||
use std::time::Duration;
|
||||
|
||||
// Create with custom configuration
|
||||
let config = KubernetesConfig::new()
|
||||
.with_timeout(Duration::from_secs(60))
|
||||
.with_retries(5, Duration::from_secs(1), Duration::from_secs(30))
|
||||
.with_rate_limit(20, 50);
|
||||
|
||||
let km = KubernetesManager::with_config("my-namespace", config).await?;
|
||||
```
|
||||
|
||||
### Pre-configured Profiles
|
||||
|
||||
```rust
|
||||
// High-throughput environment
|
||||
let config = KubernetesConfig::high_throughput();
|
||||
|
||||
// Low-latency environment
|
||||
let config = KubernetesConfig::low_latency();
|
||||
|
||||
// Development/testing
|
||||
let config = KubernetesConfig::development();
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
All operations return `Result<T, KubernetesError>` with comprehensive error types for different failure scenarios including API errors, configuration issues, and permission problems.
|
||||
|
||||
## API Reference
|
||||
|
||||
### KubernetesManager
|
||||
|
||||
The main interface for Kubernetes operations. Each instance is scoped to a single namespace.
|
||||
|
||||
#### Constructor
|
||||
|
||||
- `KubernetesManager::new(namespace)` - Create a manager for the specified namespace
|
||||
|
||||
#### Application Deployment
|
||||
|
||||
- `deploy_application(name, image, replicas, port, labels, env_vars)` - Deploy complete application with deployment and service
|
||||
- `deployment_create(name, image, replicas, labels, env_vars)` - Create deployment with environment variables and labels
|
||||
|
||||
#### Resource Creation
|
||||
|
||||
- `pod_create(name, image, labels, env_vars)` - Create pod with environment variables and labels
|
||||
- `service_create(name, selector, port, target_port)` - Create service with port mapping
|
||||
- `configmap_create(name, data)` - Create configmap with data
|
||||
- `secret_create(name, data, secret_type)` - Create secret with data and optional type
|
||||
|
||||
#### Resource Listing
|
||||
|
||||
- `pods_list()` - List all pods in the namespace
|
||||
- `services_list()` - List all services in the namespace
|
||||
- `deployments_list()` - List all deployments in the namespace
|
||||
- `configmaps_list()` - List all configmaps in the namespace
|
||||
- `secrets_list()` - List all secrets in the namespace
|
||||
|
||||
#### Resource Management
|
||||
|
||||
- `pod_get(name)` - Get a specific pod by name
|
||||
- `service_get(name)` - Get a specific service by name
|
||||
- `deployment_get(name)` - Get a specific deployment by name
|
||||
- `pod_delete(name)` - Delete a specific pod by name
|
||||
- `service_delete(name)` - Delete a specific service by name
|
||||
- `deployment_delete(name)` - Delete a specific deployment by name
|
||||
- `configmap_delete(name)` - Delete a specific configmap by name
|
||||
- `secret_delete(name)` - Delete a specific secret by name
|
||||
|
||||
#### Pattern-based Operations
|
||||
|
||||
- `delete(pattern)` - Delete all resources matching a PCRE pattern
|
||||
|
||||
#### Namespace Operations
|
||||
|
||||
- `namespace_create(name)` - Create a namespace (idempotent)
|
||||
- `namespace_exists(name)` - Check if a namespace exists
|
||||
- `namespaces_list()` - List all namespaces (cluster-wide)
|
||||
|
||||
#### Utility Functions
|
||||
|
||||
- `resource_counts()` - Get counts of all resource types in the namespace
|
||||
- `namespace()` - Get the namespace this manager operates on
|
||||
|
||||
### Rhai Functions
|
||||
|
||||
When using the Rhai integration, the following functions are available:
|
||||
|
||||
**Manager Creation & Application Deployment:**
|
||||
|
||||
- `kubernetes_manager_new(namespace)` - Create a KubernetesManager
|
||||
- `deploy_application(km, name, image, replicas, port, labels, env_vars)` - Deploy application with environment variables
|
||||
|
||||
**Resource Listing:**
|
||||
|
||||
- `pods_list(km)` - List pods
|
||||
- `services_list(km)` - List services
|
||||
- `deployments_list(km)` - List deployments
|
||||
- `configmaps_list(km)` - List configmaps
|
||||
- `secrets_list(km)` - List secrets
|
||||
- `namespaces_list(km)` - List all namespaces
|
||||
- `resource_counts(km)` - Get resource counts
|
||||
|
||||
**Resource Operations:**
|
||||
|
||||
- `delete(km, pattern)` - Delete resources matching pattern
|
||||
- `pod_delete(km, name)` - Delete specific pod
|
||||
- `service_delete(km, name)` - Delete specific service
|
||||
- `deployment_delete(km, name)` - Delete specific deployment
|
||||
- `configmap_delete(km, name)` - Delete specific configmap
|
||||
- `secret_delete(km, name)` - Delete specific secret
|
||||
|
||||
**Namespace Functions:**
|
||||
|
||||
- `namespace_create(km, name)` - Create namespace
|
||||
- `namespace_exists(km, name)` - Check namespace existence
|
||||
- `namespace_delete(km, name)` - Delete namespace
|
||||
- `namespace(km)` - Get manager's namespace
|
||||
|
||||
## Examples
|
||||
|
||||
The `examples/kubernetes/clusters/` directory contains comprehensive examples:
|
||||
|
||||
### Rust Examples
|
||||
|
||||
Run with: `cargo run --example <name> --features kubernetes`
|
||||
|
||||
- `postgres` - PostgreSQL database deployment with environment variables
|
||||
- `redis` - Redis cache deployment with configuration
|
||||
- `generic` - Multiple application deployments (nginx, node.js, mongodb)
|
||||
|
||||
### Rhai Examples
|
||||
|
||||
Run with: `./target/debug/herodo examples/kubernetes/clusters/<script>.rhai`
|
||||
|
||||
- `postgres.rhai` - PostgreSQL cluster deployment script
|
||||
- `redis.rhai` - Redis cluster deployment script
|
||||
|
||||
### Real-World Examples
|
||||
|
||||
#### PostgreSQL Database
|
||||
|
||||
```rust
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("POSTGRES_DB".to_string(), "myapp".to_string());
|
||||
env_vars.insert("POSTGRES_USER".to_string(), "postgres".to_string());
|
||||
env_vars.insert("POSTGRES_PASSWORD".to_string(), "secretpassword".to_string());
|
||||
|
||||
km.deploy_application("postgres", "postgres:15", 1, 5432, Some(labels), Some(env_vars)).await?;
|
||||
```
|
||||
|
||||
#### Redis Cache
|
||||
|
||||
```rust
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("REDIS_PASSWORD".to_string(), "redispassword".to_string());
|
||||
env_vars.insert("REDIS_MAXMEMORY".to_string(), "256mb".to_string());
|
||||
|
||||
km.deploy_application("redis", "redis:7-alpine", 3, 6379, None, Some(env_vars)).await?;
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Test Coverage
|
||||
|
||||
The module includes comprehensive test coverage:
|
||||
|
||||
- **Unit Tests**: Core functionality without cluster dependency
|
||||
- **Integration Tests**: Real Kubernetes cluster operations
|
||||
- **Environment Variables Tests**: Complete env var functionality testing
|
||||
- **Edge Cases Tests**: Error handling and boundary conditions
|
||||
- **Rhai Integration Tests**: Scripting environment testing
|
||||
- **Production Readiness Tests**: Concurrent operations and error handling
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
# Unit tests (no cluster required)
|
||||
cargo test --package sal-kubernetes
|
||||
|
||||
# Integration tests (requires cluster)
|
||||
KUBERNETES_TEST_ENABLED=1 cargo test --package sal-kubernetes
|
||||
|
||||
# Rhai integration tests
|
||||
KUBERNETES_TEST_ENABLED=1 cargo test --package sal-kubernetes --features rhai
|
||||
|
||||
# Run specific test suites
|
||||
cargo test --package sal-kubernetes deployment_env_vars_test
|
||||
cargo test --package sal-kubernetes edge_cases_test
|
||||
|
||||
# Rhai environment variables test
|
||||
KUBERNETES_TEST_ENABLED=1 ./target/debug/herodo kubernetes/tests/rhai/env_vars_test.rhai
|
||||
```
|
||||
|
||||
### Test Requirements
|
||||
|
||||
- **Kubernetes Cluster**: Integration tests require a running Kubernetes cluster
|
||||
- **Environment Variable**: Set `KUBERNETES_TEST_ENABLED=1` to enable integration tests
|
||||
- **Permissions**: Tests require permissions to create/delete resources in the `default` namespace
|
||||
|
||||
## Production Considerations
|
||||
|
||||
### Security
|
||||
|
||||
- Always use specific PCRE patterns to avoid accidental deletion of important resources
|
||||
- Test deletion patterns in a safe environment first
|
||||
- Ensure proper RBAC permissions are configured
|
||||
- Be cautious with cluster-wide operations like namespace listing
|
||||
- Use Kubernetes secrets for sensitive environment variables instead of plain text
|
||||
|
||||
### Performance & Scalability
|
||||
|
||||
- Consider adding resource limits (CPU/memory) for production deployments
|
||||
- Use persistent volumes for stateful applications
|
||||
- Configure readiness and liveness probes for health checks
|
||||
- Implement proper monitoring and logging labels
|
||||
|
||||
### Environment Variables Best Practices
|
||||
|
||||
- Use Kubernetes secrets for sensitive data (passwords, API keys)
|
||||
- Validate environment variable values before deployment
|
||||
- Use consistent naming conventions (e.g., `DATABASE_URL`, `API_KEY`)
|
||||
- Document required vs optional environment variables
|
||||
|
||||
### Example: Production-Ready Deployment
|
||||
|
||||
```rust
|
||||
// Production labels for monitoring and management
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "web-api".to_string());
|
||||
labels.insert("version".to_string(), "v1.2.3".to_string());
|
||||
labels.insert("environment".to_string(), "production".to_string());
|
||||
labels.insert("team".to_string(), "backend".to_string());
|
||||
|
||||
// Non-sensitive environment variables
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("NODE_ENV".to_string(), "production".to_string());
|
||||
env_vars.insert("LOG_LEVEL".to_string(), "info".to_string());
|
||||
env_vars.insert("PORT".to_string(), "3000".to_string());
|
||||
// Note: Use Kubernetes secrets for DATABASE_URL, API_KEY, etc.
|
||||
|
||||
km.deploy_application("web-api", "myapp:v1.2.3", 3, 3000, Some(labels), Some(env_vars)).await?;
|
||||
```
|
||||
@@ -1,113 +0,0 @@
|
||||
//! Configuration for production safety features
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
/// Configuration for Kubernetes operations with production safety features
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct KubernetesConfig {
|
||||
/// Timeout for individual API operations
|
||||
pub operation_timeout: Duration,
|
||||
|
||||
/// Maximum number of retry attempts for failed operations
|
||||
pub max_retries: u32,
|
||||
|
||||
/// Base delay for exponential backoff retry strategy
|
||||
pub retry_base_delay: Duration,
|
||||
|
||||
/// Maximum delay between retries
|
||||
pub retry_max_delay: Duration,
|
||||
|
||||
/// Rate limiting: maximum requests per second
|
||||
pub rate_limit_rps: u32,
|
||||
|
||||
/// Rate limiting: burst capacity
|
||||
pub rate_limit_burst: u32,
|
||||
}
|
||||
|
||||
impl Default for KubernetesConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
// Conservative timeout for production
|
||||
operation_timeout: Duration::from_secs(30),
|
||||
|
||||
// Reasonable retry attempts
|
||||
max_retries: 3,
|
||||
|
||||
// Exponential backoff starting at 1 second
|
||||
retry_base_delay: Duration::from_secs(1),
|
||||
|
||||
// Maximum 30 seconds between retries
|
||||
retry_max_delay: Duration::from_secs(30),
|
||||
|
||||
// Conservative rate limiting: 10 requests per second
|
||||
rate_limit_rps: 10,
|
||||
|
||||
// Allow small bursts
|
||||
rate_limit_burst: 20,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl KubernetesConfig {
|
||||
/// Create a new configuration with custom settings
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Set operation timeout
|
||||
pub fn with_timeout(mut self, timeout: Duration) -> Self {
|
||||
self.operation_timeout = timeout;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set retry configuration
|
||||
pub fn with_retries(mut self, max_retries: u32, base_delay: Duration, max_delay: Duration) -> Self {
|
||||
self.max_retries = max_retries;
|
||||
self.retry_base_delay = base_delay;
|
||||
self.retry_max_delay = max_delay;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set rate limiting configuration
|
||||
pub fn with_rate_limit(mut self, rps: u32, burst: u32) -> Self {
|
||||
self.rate_limit_rps = rps;
|
||||
self.rate_limit_burst = burst;
|
||||
self
|
||||
}
|
||||
|
||||
/// Create configuration optimized for high-throughput environments
|
||||
pub fn high_throughput() -> Self {
|
||||
Self {
|
||||
operation_timeout: Duration::from_secs(60),
|
||||
max_retries: 5,
|
||||
retry_base_delay: Duration::from_millis(500),
|
||||
retry_max_delay: Duration::from_secs(60),
|
||||
rate_limit_rps: 50,
|
||||
rate_limit_burst: 100,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create configuration optimized for low-latency environments
|
||||
pub fn low_latency() -> Self {
|
||||
Self {
|
||||
operation_timeout: Duration::from_secs(10),
|
||||
max_retries: 2,
|
||||
retry_base_delay: Duration::from_millis(100),
|
||||
retry_max_delay: Duration::from_secs(5),
|
||||
rate_limit_rps: 20,
|
||||
rate_limit_burst: 40,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create configuration for development/testing
|
||||
pub fn development() -> Self {
|
||||
Self {
|
||||
operation_timeout: Duration::from_secs(120),
|
||||
max_retries: 1,
|
||||
retry_base_delay: Duration::from_millis(100),
|
||||
retry_max_delay: Duration::from_secs(2),
|
||||
rate_limit_rps: 100,
|
||||
rate_limit_burst: 200,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
//! Error types for SAL Kubernetes operations
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Errors that can occur during Kubernetes operations
|
||||
#[derive(Error, Debug)]
|
||||
pub enum KubernetesError {
|
||||
/// Kubernetes API client error
|
||||
#[error("Kubernetes API error: {0}")]
|
||||
ApiError(#[from] kube::Error),
|
||||
|
||||
/// Configuration error
|
||||
#[error("Configuration error: {0}")]
|
||||
ConfigError(String),
|
||||
|
||||
/// Resource not found error
|
||||
#[error("Resource not found: {0}")]
|
||||
ResourceNotFound(String),
|
||||
|
||||
/// Invalid resource name or pattern
|
||||
#[error("Invalid resource name or pattern: {0}")]
|
||||
InvalidResourceName(String),
|
||||
|
||||
/// Regular expression error
|
||||
#[error("Regular expression error: {0}")]
|
||||
RegexError(#[from] regex::Error),
|
||||
|
||||
/// Serialization/deserialization error
|
||||
#[error("Serialization error: {0}")]
|
||||
SerializationError(#[from] serde_json::Error),
|
||||
|
||||
/// YAML parsing error
|
||||
#[error("YAML error: {0}")]
|
||||
YamlError(#[from] serde_yaml::Error),
|
||||
|
||||
/// Generic operation error
|
||||
#[error("Operation failed: {0}")]
|
||||
OperationError(String),
|
||||
|
||||
/// Namespace error
|
||||
#[error("Namespace error: {0}")]
|
||||
NamespaceError(String),
|
||||
|
||||
/// Permission denied error
|
||||
#[error("Permission denied: {0}")]
|
||||
PermissionDenied(String),
|
||||
|
||||
/// Timeout error
|
||||
#[error("Operation timed out: {0}")]
|
||||
Timeout(String),
|
||||
|
||||
/// Generic error wrapper
|
||||
#[error("Generic error: {0}")]
|
||||
Generic(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
impl KubernetesError {
|
||||
/// Create a new configuration error
|
||||
pub fn config_error(msg: impl Into<String>) -> Self {
|
||||
Self::ConfigError(msg.into())
|
||||
}
|
||||
|
||||
/// Create a new operation error
|
||||
pub fn operation_error(msg: impl Into<String>) -> Self {
|
||||
Self::OperationError(msg.into())
|
||||
}
|
||||
|
||||
/// Create a new namespace error
|
||||
pub fn namespace_error(msg: impl Into<String>) -> Self {
|
||||
Self::NamespaceError(msg.into())
|
||||
}
|
||||
|
||||
/// Create a new permission denied error
|
||||
pub fn permission_denied(msg: impl Into<String>) -> Self {
|
||||
Self::PermissionDenied(msg.into())
|
||||
}
|
||||
|
||||
/// Create a new timeout error
|
||||
pub fn timeout(msg: impl Into<String>) -> Self {
|
||||
Self::Timeout(msg.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Result type for Kubernetes operations
|
||||
pub type KubernetesResult<T> = Result<T, KubernetesError>;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,49 +0,0 @@
|
||||
//! SAL Kubernetes: Kubernetes cluster management and operations
|
||||
//!
|
||||
//! This package provides Kubernetes cluster management functionality including:
|
||||
//! - Namespace-scoped resource management via KubernetesManager
|
||||
//! - Pod listing and management
|
||||
//! - Resource deletion with PCRE pattern matching
|
||||
//! - Namespace creation and management
|
||||
//! - Support for various Kubernetes resources (pods, services, deployments, etc.)
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! use sal_kubernetes::KubernetesManager;
|
||||
//!
|
||||
//! #[tokio::main]
|
||||
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! // Create a manager for the "default" namespace
|
||||
//! let km = KubernetesManager::new("default").await?;
|
||||
//!
|
||||
//! // List all pods in the namespace
|
||||
//! let pods = km.pods_list().await?;
|
||||
//! println!("Found {} pods", pods.len());
|
||||
//!
|
||||
//! // Create a namespace (idempotent)
|
||||
//! km.namespace_create("my-namespace").await?;
|
||||
//!
|
||||
//! // Delete resources matching a pattern
|
||||
//! km.delete("test-.*").await?;
|
||||
//!
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod kubernetes_manager;
|
||||
|
||||
// Rhai integration module
|
||||
#[cfg(feature = "rhai")]
|
||||
pub mod rhai;
|
||||
|
||||
// Re-export main types for convenience
|
||||
pub use config::KubernetesConfig;
|
||||
pub use error::KubernetesError;
|
||||
pub use kubernetes_manager::KubernetesManager;
|
||||
|
||||
// Re-export commonly used Kubernetes types
|
||||
pub use k8s_openapi::api::apps::v1::{Deployment, ReplicaSet};
|
||||
pub use k8s_openapi::api::core::v1::{Namespace, Pod, Service};
|
||||
@@ -1,729 +0,0 @@
|
||||
//! Rhai wrappers for Kubernetes module functions
|
||||
//!
|
||||
//! This module provides Rhai wrappers for the functions in the Kubernetes module,
|
||||
//! enabling scripting access to Kubernetes operations.
|
||||
|
||||
use crate::{KubernetesError, KubernetesManager};
|
||||
use once_cell::sync::Lazy;
|
||||
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map};
|
||||
use std::sync::Mutex;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
// Global Tokio runtime for blocking async operations
|
||||
static RUNTIME: Lazy<Mutex<Runtime>> =
|
||||
Lazy::new(|| Mutex::new(Runtime::new().expect("Failed to create Tokio runtime")));
|
||||
|
||||
/// Helper function to convert Rhai Map to HashMap for environment variables
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `rhai_map` - Rhai Map containing key-value pairs
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Option<std::collections::HashMap<String, String>>` - Converted HashMap or None if empty
|
||||
fn convert_rhai_map_to_env_vars(
|
||||
rhai_map: Map,
|
||||
) -> Option<std::collections::HashMap<String, String>> {
|
||||
if rhai_map.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
rhai_map
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to execute async operations with proper runtime handling
|
||||
///
|
||||
/// This uses a global runtime to ensure consistent async execution
|
||||
fn execute_async<F, T>(future: F) -> Result<T, Box<EvalAltResult>>
|
||||
where
|
||||
F: std::future::Future<Output = Result<T, KubernetesError>>,
|
||||
{
|
||||
// Get the global runtime
|
||||
let rt = match RUNTIME.lock() {
|
||||
Ok(rt) => rt,
|
||||
Err(e) => {
|
||||
return Err(Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("Failed to acquire runtime lock: {e}").into(),
|
||||
rhai::Position::NONE,
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
// Execute the future in a blocking manner
|
||||
rt.block_on(future).map_err(kubernetes_error_to_rhai_error)
|
||||
}
|
||||
|
||||
/// Create a new KubernetesManager for the specified namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `namespace` - The Kubernetes namespace to operate on
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<KubernetesManager, Box<EvalAltResult>>` - The manager instance or an error
|
||||
fn kubernetes_manager_new(namespace: String) -> Result<KubernetesManager, Box<EvalAltResult>> {
|
||||
execute_async(KubernetesManager::new(namespace))
|
||||
}
|
||||
|
||||
/// List all pods in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of pod names or an error
|
||||
fn pods_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let pods = execute_async(km.pods_list())?;
|
||||
|
||||
let pod_names: Array = pods
|
||||
.iter()
|
||||
.filter_map(|pod| pod.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(pod_names)
|
||||
}
|
||||
|
||||
/// List all services in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of service names or an error
|
||||
fn services_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let services = execute_async(km.services_list())?;
|
||||
|
||||
let service_names: Array = services
|
||||
.iter()
|
||||
.filter_map(|service| service.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(service_names)
|
||||
}
|
||||
|
||||
/// List all deployments in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of deployment names or an error
|
||||
fn deployments_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let deployments = execute_async(km.deployments_list())?;
|
||||
|
||||
let deployment_names: Array = deployments
|
||||
.iter()
|
||||
.filter_map(|deployment| deployment.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(deployment_names)
|
||||
}
|
||||
|
||||
/// List all configmaps in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of configmap names or an error
|
||||
fn configmaps_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let configmaps = execute_async(km.configmaps_list())?;
|
||||
|
||||
let configmap_names: Array = configmaps
|
||||
.iter()
|
||||
.filter_map(|configmap| configmap.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(configmap_names)
|
||||
}
|
||||
|
||||
/// List all secrets in the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of secret names or an error
|
||||
fn secrets_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let secrets = execute_async(km.secrets_list())?;
|
||||
|
||||
let secret_names: Array = secrets
|
||||
.iter()
|
||||
.filter_map(|secret| secret.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(secret_names)
|
||||
}
|
||||
|
||||
/// Delete resources matching a PCRE pattern
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `pattern` - PCRE pattern to match resource names against
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<i64, Box<EvalAltResult>>` - Number of resources deleted or an error
|
||||
///
|
||||
/// Create a pod with a single container (backward compatible version)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the pod
|
||||
/// * `image` - Container image to use
|
||||
/// * `labels` - Optional labels as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
|
||||
fn pod_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
labels: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
|
||||
let pod = execute_async(km.pod_create(&name, &image, labels_map, None))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a pod with a single container and environment variables
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the pod
|
||||
/// * `image` - Container image to use
|
||||
/// * `labels` - Optional labels as a Map
|
||||
/// * `env_vars` - Optional environment variables as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
|
||||
fn pod_create_with_env(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
|
||||
let pod = execute_async(km.pod_create(&name, &image, labels_map, env_vars_map))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a service
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the service
|
||||
/// * `selector` - Labels to select pods as a Map
|
||||
/// * `port` - Port to expose
|
||||
/// * `target_port` - Target port on pods (optional, defaults to port)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Service name or an error
|
||||
fn service_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
selector: Map,
|
||||
port: i64,
|
||||
target_port: i64,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let selector_map: std::collections::HashMap<String, String> = selector
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
|
||||
let target_port_opt = if target_port == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(target_port as i32)
|
||||
};
|
||||
let service =
|
||||
execute_async(km.service_create(&name, selector_map, port as i32, target_port_opt))?;
|
||||
Ok(service.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a deployment
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the deployment
|
||||
/// * `image` - Container image to use
|
||||
/// * `replicas` - Number of replicas
|
||||
/// * `labels` - Optional labels as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Deployment name or an error
|
||||
fn deployment_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
replicas: i64,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
|
||||
let deployment = execute_async(km.deployment_create(
|
||||
&name,
|
||||
&image,
|
||||
replicas as i32,
|
||||
labels_map,
|
||||
env_vars_map,
|
||||
))?;
|
||||
Ok(deployment.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a ConfigMap
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the ConfigMap
|
||||
/// * `data` - Data as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - ConfigMap name or an error
|
||||
fn configmap_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
data: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let data_map: std::collections::HashMap<String, String> = data
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
|
||||
let configmap = execute_async(km.configmap_create(&name, data_map))?;
|
||||
Ok(configmap.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Create a Secret
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the Secret
|
||||
/// * `data` - Data as a Map (will be base64 encoded)
|
||||
/// * `secret_type` - Type of secret (optional, defaults to "Opaque")
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Secret name or an error
|
||||
fn secret_create(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
data: Map,
|
||||
secret_type: String,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let data_map: std::collections::HashMap<String, String> = data
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect();
|
||||
|
||||
let secret_type_opt = if secret_type.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(secret_type.as_str())
|
||||
};
|
||||
let secret = execute_async(km.secret_create(&name, data_map, secret_type_opt))?;
|
||||
Ok(secret.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Get a pod by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the pod to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
|
||||
fn pod_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let pod = execute_async(km.pod_get(&name))?;
|
||||
Ok(pod.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Get a service by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the service to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Service name or an error
|
||||
fn service_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let service = execute_async(km.service_get(&name))?;
|
||||
Ok(service.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
/// Get a deployment by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the deployment to get
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Deployment name or an error
|
||||
fn deployment_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
|
||||
let deployment = execute_async(km.deployment_get(&name))?;
|
||||
Ok(deployment.metadata.name.unwrap_or(name))
|
||||
}
|
||||
|
||||
fn delete(km: &mut KubernetesManager, pattern: String) -> Result<i64, Box<EvalAltResult>> {
|
||||
let deleted_count = execute_async(km.delete(&pattern))?;
|
||||
|
||||
Ok(deleted_count as i64)
|
||||
}
|
||||
|
||||
/// Create a namespace (idempotent operation)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the namespace to create
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn namespace_create(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_create(&name))
|
||||
}
|
||||
|
||||
/// Delete a namespace (destructive operation)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the namespace to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn namespace_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_delete(&name))
|
||||
}
|
||||
|
||||
/// Check if a namespace exists
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the namespace to check
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<bool, Box<EvalAltResult>>` - True if namespace exists, false otherwise
|
||||
fn namespace_exists(km: &mut KubernetesManager, name: String) -> Result<bool, Box<EvalAltResult>> {
|
||||
execute_async(km.namespace_exists(&name))
|
||||
}
|
||||
|
||||
/// List all namespaces
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Array, Box<EvalAltResult>>` - Array of namespace names or an error
|
||||
fn namespaces_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
|
||||
let namespaces = execute_async(km.namespaces_list())?;
|
||||
|
||||
let namespace_names: Array = namespaces
|
||||
.iter()
|
||||
.filter_map(|ns| ns.metadata.name.as_ref())
|
||||
.map(|name| Dynamic::from(name.clone()))
|
||||
.collect();
|
||||
|
||||
Ok(namespace_names)
|
||||
}
|
||||
|
||||
/// Get resource counts for the namespace
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Map, Box<EvalAltResult>>` - Map of resource counts by type or an error
|
||||
fn resource_counts(km: &mut KubernetesManager) -> Result<Map, Box<EvalAltResult>> {
|
||||
let counts = execute_async(km.resource_counts())?;
|
||||
|
||||
let mut rhai_map = Map::new();
|
||||
for (key, value) in counts {
|
||||
rhai_map.insert(key.into(), Dynamic::from(value as i64));
|
||||
}
|
||||
|
||||
Ok(rhai_map)
|
||||
}
|
||||
|
||||
/// Deploy a complete application with deployment and service
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the application
|
||||
/// * `image` - Container image to use
|
||||
/// * `replicas` - Number of replicas
|
||||
/// * `port` - Port the application listens on
|
||||
/// * `labels` - Optional labels as a Map
|
||||
/// * `env_vars` - Optional environment variables as a Map
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<String, Box<EvalAltResult>>` - Success message or an error
|
||||
fn deploy_application(
|
||||
km: &mut KubernetesManager,
|
||||
name: String,
|
||||
image: String,
|
||||
replicas: i64,
|
||||
port: i64,
|
||||
labels: Map,
|
||||
env_vars: Map,
|
||||
) -> Result<String, Box<EvalAltResult>> {
|
||||
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
labels
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.to_string()))
|
||||
.collect(),
|
||||
)
|
||||
};
|
||||
|
||||
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
|
||||
|
||||
execute_async(km.deploy_application(
|
||||
&name,
|
||||
&image,
|
||||
replicas as i32,
|
||||
port as i32,
|
||||
labels_map,
|
||||
env_vars_map,
|
||||
))?;
|
||||
|
||||
Ok(format!("Successfully deployed application '{name}'"))
|
||||
}
|
||||
|
||||
/// Delete a specific pod by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the pod to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn pod_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.pod_delete(&name))
|
||||
}
|
||||
|
||||
/// Delete a specific service by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the service to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn service_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.service_delete(&name))
|
||||
}
|
||||
|
||||
/// Delete a specific deployment by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
/// * `name` - The name of the deployment to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn deployment_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.deployment_delete(&name))
|
||||
}
|
||||
|
||||
/// Delete a ConfigMap by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the ConfigMap to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn configmap_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.configmap_delete(&name))
|
||||
}
|
||||
|
||||
/// Delete a Secret by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - Mutable reference to KubernetesManager
|
||||
/// * `name` - Name of the Secret to delete
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
|
||||
fn secret_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
|
||||
execute_async(km.secret_delete(&name))
|
||||
}
|
||||
|
||||
/// Get the namespace this manager operates on
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `km` - The KubernetesManager instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `String` - The namespace name
|
||||
fn kubernetes_manager_namespace(km: &mut KubernetesManager) -> String {
|
||||
km.namespace().to_string()
|
||||
}
|
||||
|
||||
/// Register Kubernetes module functions with the Rhai engine
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `engine` - The Rhai engine to register the functions with
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<(), Box<EvalAltResult>>` - Ok if registration was successful, Err otherwise
|
||||
pub fn register_kubernetes_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
// Register KubernetesManager type
|
||||
engine.register_type::<KubernetesManager>();
|
||||
|
||||
// Register KubernetesManager constructor and methods
|
||||
engine.register_fn("kubernetes_manager_new", kubernetes_manager_new);
|
||||
engine.register_fn("namespace", kubernetes_manager_namespace);
|
||||
|
||||
// Register resource listing functions
|
||||
engine.register_fn("pods_list", pods_list);
|
||||
engine.register_fn("services_list", services_list);
|
||||
engine.register_fn("deployments_list", deployments_list);
|
||||
engine.register_fn("configmaps_list", configmaps_list);
|
||||
engine.register_fn("secrets_list", secrets_list);
|
||||
engine.register_fn("namespaces_list", namespaces_list);
|
||||
|
||||
// Register resource creation methods (object-oriented style)
|
||||
engine.register_fn("create_pod", pod_create);
|
||||
engine.register_fn("create_pod_with_env", pod_create_with_env);
|
||||
engine.register_fn("create_service", service_create);
|
||||
engine.register_fn("create_deployment", deployment_create);
|
||||
engine.register_fn("create_configmap", configmap_create);
|
||||
engine.register_fn("create_secret", secret_create);
|
||||
|
||||
// Register resource get methods
|
||||
engine.register_fn("get_pod", pod_get);
|
||||
engine.register_fn("get_service", service_get);
|
||||
engine.register_fn("get_deployment", deployment_get);
|
||||
|
||||
// Register resource management methods
|
||||
engine.register_fn("delete", delete);
|
||||
engine.register_fn("delete_pod", pod_delete);
|
||||
engine.register_fn("delete_service", service_delete);
|
||||
engine.register_fn("delete_deployment", deployment_delete);
|
||||
engine.register_fn("delete_configmap", configmap_delete);
|
||||
engine.register_fn("delete_secret", secret_delete);
|
||||
|
||||
// Register namespace methods (object-oriented style)
|
||||
engine.register_fn("create_namespace", namespace_create);
|
||||
engine.register_fn("delete_namespace", namespace_delete);
|
||||
engine.register_fn("namespace_exists", namespace_exists);
|
||||
|
||||
// Register utility functions
|
||||
engine.register_fn("resource_counts", resource_counts);
|
||||
|
||||
// Register convenience functions
|
||||
engine.register_fn("deploy_application", deploy_application);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper function for error conversion
|
||||
fn kubernetes_error_to_rhai_error(error: KubernetesError) -> Box<EvalAltResult> {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("Kubernetes error: {error}").into(),
|
||||
rhai::Position::NONE,
|
||||
))
|
||||
}
|
||||
@@ -1,253 +0,0 @@
|
||||
//! CRUD operations tests for SAL Kubernetes
|
||||
//!
|
||||
//! These tests verify that all Create, Read, Update, Delete operations work correctly.
|
||||
|
||||
#[cfg(test)]
|
||||
mod crud_tests {
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Check if Kubernetes integration tests should run
|
||||
fn should_run_k8s_tests() -> bool {
|
||||
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_complete_crud_operations() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping CRUD test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
println!("🔍 Testing complete CRUD operations...");
|
||||
|
||||
// Create a test namespace for our operations
|
||||
let test_namespace = "sal-crud-test";
|
||||
let km = KubernetesManager::new("default")
|
||||
.await
|
||||
.expect("Should connect to cluster");
|
||||
|
||||
// Clean up any existing test namespace
|
||||
let _ = km.namespace_delete(test_namespace).await;
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
||||
|
||||
// CREATE operations
|
||||
println!("\n=== CREATE Operations ===");
|
||||
|
||||
// 1. Create namespace
|
||||
km.namespace_create(test_namespace)
|
||||
.await
|
||||
.expect("Should create test namespace");
|
||||
println!("✅ Created namespace: {}", test_namespace);
|
||||
|
||||
// Switch to test namespace
|
||||
let test_km = KubernetesManager::new(test_namespace)
|
||||
.await
|
||||
.expect("Should connect to test namespace");
|
||||
|
||||
// 2. Create ConfigMap
|
||||
let mut config_data = HashMap::new();
|
||||
config_data.insert(
|
||||
"app.properties".to_string(),
|
||||
"debug=true\nport=8080".to_string(),
|
||||
);
|
||||
config_data.insert(
|
||||
"config.yaml".to_string(),
|
||||
"key: value\nenv: test".to_string(),
|
||||
);
|
||||
|
||||
let configmap = test_km
|
||||
.configmap_create("test-config", config_data)
|
||||
.await
|
||||
.expect("Should create ConfigMap");
|
||||
println!(
|
||||
"✅ Created ConfigMap: {}",
|
||||
configmap.metadata.name.unwrap_or_default()
|
||||
);
|
||||
|
||||
// 3. Create Secret
|
||||
let mut secret_data = HashMap::new();
|
||||
secret_data.insert("username".to_string(), "testuser".to_string());
|
||||
secret_data.insert("password".to_string(), "secret123".to_string());
|
||||
|
||||
let secret = test_km
|
||||
.secret_create("test-secret", secret_data, None)
|
||||
.await
|
||||
.expect("Should create Secret");
|
||||
println!(
|
||||
"✅ Created Secret: {}",
|
||||
secret.metadata.name.unwrap_or_default()
|
||||
);
|
||||
|
||||
// 4. Create Pod
|
||||
let mut pod_labels = HashMap::new();
|
||||
pod_labels.insert("app".to_string(), "test-app".to_string());
|
||||
pod_labels.insert("version".to_string(), "v1".to_string());
|
||||
|
||||
let pod = test_km
|
||||
.pod_create("test-pod", "nginx:alpine", Some(pod_labels.clone()), None)
|
||||
.await
|
||||
.expect("Should create Pod");
|
||||
println!("✅ Created Pod: {}", pod.metadata.name.unwrap_or_default());
|
||||
|
||||
// 5. Create Service
|
||||
let service = test_km
|
||||
.service_create("test-service", pod_labels.clone(), 80, Some(80))
|
||||
.await
|
||||
.expect("Should create Service");
|
||||
println!(
|
||||
"✅ Created Service: {}",
|
||||
service.metadata.name.unwrap_or_default()
|
||||
);
|
||||
|
||||
// 6. Create Deployment
|
||||
let deployment = test_km
|
||||
.deployment_create("test-deployment", "nginx:alpine", 2, Some(pod_labels), None)
|
||||
.await
|
||||
.expect("Should create Deployment");
|
||||
println!(
|
||||
"✅ Created Deployment: {}",
|
||||
deployment.metadata.name.unwrap_or_default()
|
||||
);
|
||||
|
||||
// READ operations
|
||||
println!("\n=== READ Operations ===");
|
||||
|
||||
// List all resources
|
||||
let pods = test_km.pods_list().await.expect("Should list pods");
|
||||
println!("✅ Listed {} pods", pods.len());
|
||||
|
||||
let services = test_km.services_list().await.expect("Should list services");
|
||||
println!("✅ Listed {} services", services.len());
|
||||
|
||||
let deployments = test_km
|
||||
.deployments_list()
|
||||
.await
|
||||
.expect("Should list deployments");
|
||||
println!("✅ Listed {} deployments", deployments.len());
|
||||
|
||||
let configmaps = test_km
|
||||
.configmaps_list()
|
||||
.await
|
||||
.expect("Should list configmaps");
|
||||
println!("✅ Listed {} configmaps", configmaps.len());
|
||||
|
||||
let secrets = test_km.secrets_list().await.expect("Should list secrets");
|
||||
println!("✅ Listed {} secrets", secrets.len());
|
||||
|
||||
// Get specific resources
|
||||
let pod = test_km.pod_get("test-pod").await.expect("Should get pod");
|
||||
println!(
|
||||
"✅ Retrieved pod: {}",
|
||||
pod.metadata.name.unwrap_or_default()
|
||||
);
|
||||
|
||||
let service = test_km
|
||||
.service_get("test-service")
|
||||
.await
|
||||
.expect("Should get service");
|
||||
println!(
|
||||
"✅ Retrieved service: {}",
|
||||
service.metadata.name.unwrap_or_default()
|
||||
);
|
||||
|
||||
let deployment = test_km
|
||||
.deployment_get("test-deployment")
|
||||
.await
|
||||
.expect("Should get deployment");
|
||||
println!(
|
||||
"✅ Retrieved deployment: {}",
|
||||
deployment.metadata.name.unwrap_or_default()
|
||||
);
|
||||
|
||||
// Resource counts
|
||||
let counts = test_km
|
||||
.resource_counts()
|
||||
.await
|
||||
.expect("Should get resource counts");
|
||||
println!("✅ Resource counts: {:?}", counts);
|
||||
|
||||
// DELETE operations
|
||||
println!("\n=== DELETE Operations ===");
|
||||
|
||||
// Delete individual resources
|
||||
test_km
|
||||
.pod_delete("test-pod")
|
||||
.await
|
||||
.expect("Should delete pod");
|
||||
println!("✅ Deleted pod");
|
||||
|
||||
test_km
|
||||
.service_delete("test-service")
|
||||
.await
|
||||
.expect("Should delete service");
|
||||
println!("✅ Deleted service");
|
||||
|
||||
test_km
|
||||
.deployment_delete("test-deployment")
|
||||
.await
|
||||
.expect("Should delete deployment");
|
||||
println!("✅ Deleted deployment");
|
||||
|
||||
test_km
|
||||
.configmap_delete("test-config")
|
||||
.await
|
||||
.expect("Should delete configmap");
|
||||
println!("✅ Deleted configmap");
|
||||
|
||||
test_km
|
||||
.secret_delete("test-secret")
|
||||
.await
|
||||
.expect("Should delete secret");
|
||||
println!("✅ Deleted secret");
|
||||
|
||||
// Verify resources are deleted
|
||||
let final_counts = test_km
|
||||
.resource_counts()
|
||||
.await
|
||||
.expect("Should get final resource counts");
|
||||
println!("✅ Final resource counts: {:?}", final_counts);
|
||||
|
||||
// Delete the test namespace
|
||||
km.namespace_delete(test_namespace)
|
||||
.await
|
||||
.expect("Should delete test namespace");
|
||||
println!("✅ Deleted test namespace");
|
||||
|
||||
println!("\n🎉 All CRUD operations completed successfully!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_handling_in_crud() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping CRUD error handling test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
println!("🔍 Testing error handling in CRUD operations...");
|
||||
|
||||
let km = KubernetesManager::new("default")
|
||||
.await
|
||||
.expect("Should connect to cluster");
|
||||
|
||||
// Test creating resources with invalid names
|
||||
let result = km.pod_create("", "nginx", None, None).await;
|
||||
assert!(result.is_err(), "Should fail with empty pod name");
|
||||
println!("✅ Empty pod name properly rejected");
|
||||
|
||||
// Test getting non-existent resources
|
||||
let result = km.pod_get("non-existent-pod").await;
|
||||
assert!(result.is_err(), "Should fail to get non-existent pod");
|
||||
println!("✅ Non-existent pod properly handled");
|
||||
|
||||
// Test deleting non-existent resources
|
||||
let result = km.service_delete("non-existent-service").await;
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Should fail to delete non-existent service"
|
||||
);
|
||||
println!("✅ Non-existent service deletion properly handled");
|
||||
|
||||
println!("✅ Error handling in CRUD operations is robust");
|
||||
}
|
||||
}
|
||||
@@ -1,384 +0,0 @@
|
||||
//! Tests for deployment creation with environment variables
|
||||
//!
|
||||
//! These tests verify the new environment variable functionality in deployments
|
||||
//! and the enhanced deploy_application method.
|
||||
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Check if Kubernetes integration tests should run
|
||||
fn should_run_k8s_tests() -> bool {
|
||||
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployment_create_with_env_vars() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return, // Skip if can't connect
|
||||
};
|
||||
|
||||
// Clean up any existing test deployment
|
||||
let _ = km.deployment_delete("test-env-deployment").await;
|
||||
|
||||
// Create deployment with environment variables
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "test-env-app".to_string());
|
||||
labels.insert("test".to_string(), "env-vars".to_string());
|
||||
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("TEST_VAR_1".to_string(), "value1".to_string());
|
||||
env_vars.insert("TEST_VAR_2".to_string(), "value2".to_string());
|
||||
env_vars.insert("NODE_ENV".to_string(), "test".to_string());
|
||||
|
||||
let result = km
|
||||
.deployment_create(
|
||||
"test-env-deployment",
|
||||
"nginx:latest",
|
||||
1,
|
||||
Some(labels),
|
||||
Some(env_vars),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to create deployment with env vars: {:?}",
|
||||
result
|
||||
);
|
||||
|
||||
// Verify the deployment was created
|
||||
let deployment = km.deployment_get("test-env-deployment").await;
|
||||
assert!(deployment.is_ok(), "Failed to get created deployment");
|
||||
|
||||
let deployment = deployment.unwrap();
|
||||
|
||||
// Verify environment variables are set in the container spec
|
||||
if let Some(spec) = &deployment.spec {
|
||||
if let Some(template) = &spec.template.spec {
|
||||
if let Some(container) = template.containers.first() {
|
||||
if let Some(env) = &container.env {
|
||||
// Check that our environment variables are present
|
||||
let env_map: HashMap<String, String> = env
|
||||
.iter()
|
||||
.filter_map(|e| e.value.as_ref().map(|v| (e.name.clone(), v.clone())))
|
||||
.collect();
|
||||
|
||||
assert_eq!(env_map.get("TEST_VAR_1"), Some(&"value1".to_string()));
|
||||
assert_eq!(env_map.get("TEST_VAR_2"), Some(&"value2".to_string()));
|
||||
assert_eq!(env_map.get("NODE_ENV"), Some(&"test".to_string()));
|
||||
} else {
|
||||
panic!("No environment variables found in container spec");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let _ = km.deployment_delete("test-env-deployment").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pod_create_with_env_vars() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return, // Skip if can't connect
|
||||
};
|
||||
|
||||
// Clean up any existing test pod
|
||||
let _ = km.pod_delete("test-env-pod").await;
|
||||
|
||||
// Create pod with environment variables
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("NODE_ENV".to_string(), "test".to_string());
|
||||
env_vars.insert(
|
||||
"DATABASE_URL".to_string(),
|
||||
"postgres://localhost:5432/test".to_string(),
|
||||
);
|
||||
env_vars.insert("API_KEY".to_string(), "test-api-key-12345".to_string());
|
||||
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "test-env-pod-app".to_string());
|
||||
labels.insert("test".to_string(), "environment-variables".to_string());
|
||||
|
||||
let result = km
|
||||
.pod_create("test-env-pod", "nginx:latest", Some(labels), Some(env_vars))
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to create pod with env vars: {:?}",
|
||||
result
|
||||
);
|
||||
|
||||
if let Ok(pod) = result {
|
||||
let pod_name = pod
|
||||
.metadata
|
||||
.name
|
||||
.as_ref()
|
||||
.unwrap_or(&"".to_string())
|
||||
.clone();
|
||||
assert_eq!(pod_name, "test-env-pod");
|
||||
println!("✅ Created pod with environment variables: {}", pod_name);
|
||||
|
||||
// Verify the pod has the expected environment variables
|
||||
if let Some(spec) = &pod.spec {
|
||||
if let Some(container) = spec.containers.first() {
|
||||
if let Some(env) = &container.env {
|
||||
let env_names: Vec<String> = env.iter().map(|e| e.name.clone()).collect();
|
||||
assert!(env_names.contains(&"NODE_ENV".to_string()));
|
||||
assert!(env_names.contains(&"DATABASE_URL".to_string()));
|
||||
assert!(env_names.contains(&"API_KEY".to_string()));
|
||||
println!("✅ Pod has expected environment variables");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let _ = km.pod_delete("test-env-pod").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployment_create_without_env_vars() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Clean up any existing test deployment
|
||||
let _ = km.deployment_delete("test-no-env-deployment").await;
|
||||
|
||||
// Create deployment without environment variables
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "test-no-env-app".to_string());
|
||||
|
||||
let result = km
|
||||
.deployment_create(
|
||||
"test-no-env-deployment",
|
||||
"nginx:latest",
|
||||
1,
|
||||
Some(labels),
|
||||
None, // No environment variables
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to create deployment without env vars: {:?}",
|
||||
result
|
||||
);
|
||||
|
||||
// Verify the deployment was created
|
||||
let deployment = km.deployment_get("test-no-env-deployment").await;
|
||||
assert!(deployment.is_ok(), "Failed to get created deployment");
|
||||
|
||||
let deployment = deployment.unwrap();
|
||||
|
||||
// Verify no environment variables are set
|
||||
if let Some(spec) = &deployment.spec {
|
||||
if let Some(template) = &spec.template.spec {
|
||||
if let Some(container) = template.containers.first() {
|
||||
// Environment variables should be None or empty
|
||||
assert!(
|
||||
container.env.is_none() || container.env.as_ref().unwrap().is_empty(),
|
||||
"Expected no environment variables, but found some"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let _ = km.deployment_delete("test-no-env-deployment").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_application_with_env_vars() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Clean up any existing resources
|
||||
let _ = km.deployment_delete("test-app-env").await;
|
||||
let _ = km.service_delete("test-app-env").await;
|
||||
|
||||
// Deploy application with both labels and environment variables
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("app".to_string(), "test-app-env".to_string());
|
||||
labels.insert("tier".to_string(), "backend".to_string());
|
||||
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert(
|
||||
"DATABASE_URL".to_string(),
|
||||
"postgres://localhost:5432/test".to_string(),
|
||||
);
|
||||
env_vars.insert("API_KEY".to_string(), "test-api-key".to_string());
|
||||
env_vars.insert("LOG_LEVEL".to_string(), "debug".to_string());
|
||||
|
||||
let result = km
|
||||
.deploy_application(
|
||||
"test-app-env",
|
||||
"nginx:latest",
|
||||
2,
|
||||
80,
|
||||
Some(labels),
|
||||
Some(env_vars),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to deploy application with env vars: {:?}",
|
||||
result
|
||||
);
|
||||
|
||||
// Verify both deployment and service were created
|
||||
let deployment = km.deployment_get("test-app-env").await;
|
||||
assert!(deployment.is_ok(), "Deployment should be created");
|
||||
|
||||
let service = km.service_get("test-app-env").await;
|
||||
assert!(service.is_ok(), "Service should be created");
|
||||
|
||||
// Verify environment variables in deployment
|
||||
let deployment = deployment.unwrap();
|
||||
if let Some(spec) = &deployment.spec {
|
||||
if let Some(template) = &spec.template.spec {
|
||||
if let Some(container) = template.containers.first() {
|
||||
if let Some(env) = &container.env {
|
||||
let env_map: HashMap<String, String> = env
|
||||
.iter()
|
||||
.filter_map(|e| e.value.as_ref().map(|v| (e.name.clone(), v.clone())))
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
env_map.get("DATABASE_URL"),
|
||||
Some(&"postgres://localhost:5432/test".to_string())
|
||||
);
|
||||
assert_eq!(env_map.get("API_KEY"), Some(&"test-api-key".to_string()));
|
||||
assert_eq!(env_map.get("LOG_LEVEL"), Some(&"debug".to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let _ = km.deployment_delete("test-app-env").await;
|
||||
let _ = km.service_delete("test-app-env").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_application_cleanup_existing_resources() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => {
|
||||
println!("Skipping test - no Kubernetes cluster available");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let app_name = "test-cleanup-app";
|
||||
|
||||
// Clean up any existing resources first to ensure clean state
|
||||
let _ = km.deployment_delete(app_name).await;
|
||||
let _ = km.service_delete(app_name).await;
|
||||
|
||||
// Wait a moment for cleanup to complete
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
|
||||
|
||||
// First deployment
|
||||
let result = km
|
||||
.deploy_application(app_name, "nginx:latest", 1, 80, None, None)
|
||||
.await;
|
||||
|
||||
if result.is_err() {
|
||||
println!("Skipping test - cluster connection unstable: {:?}", result);
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify resources exist (with graceful handling)
|
||||
let deployment_exists = km.deployment_get(app_name).await.is_ok();
|
||||
let service_exists = km.service_get(app_name).await.is_ok();
|
||||
|
||||
if !deployment_exists || !service_exists {
|
||||
println!("Skipping test - resources not created properly");
|
||||
let _ = km.deployment_delete(app_name).await;
|
||||
let _ = km.service_delete(app_name).await;
|
||||
return;
|
||||
}
|
||||
|
||||
// Second deployment with different configuration (should replace the first)
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("VERSION".to_string(), "2.0".to_string());
|
||||
|
||||
let result = km
|
||||
.deploy_application(app_name, "nginx:alpine", 2, 80, None, Some(env_vars))
|
||||
.await;
|
||||
if result.is_err() {
|
||||
println!(
|
||||
"Skipping verification - second deployment failed: {:?}",
|
||||
result
|
||||
);
|
||||
let _ = km.deployment_delete(app_name).await;
|
||||
let _ = km.service_delete(app_name).await;
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify resources still exist (replaced, not duplicated)
|
||||
let deployment = km.deployment_get(app_name).await;
|
||||
if deployment.is_err() {
|
||||
println!("Skipping verification - deployment not found after replacement");
|
||||
let _ = km.deployment_delete(app_name).await;
|
||||
let _ = km.service_delete(app_name).await;
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify the new configuration
|
||||
let deployment = deployment.unwrap();
|
||||
if let Some(spec) = &deployment.spec {
|
||||
assert_eq!(spec.replicas, Some(2), "Replicas should be updated to 2");
|
||||
|
||||
if let Some(template) = &spec.template.spec {
|
||||
if let Some(container) = template.containers.first() {
|
||||
assert_eq!(
|
||||
container.image,
|
||||
Some("nginx:alpine".to_string()),
|
||||
"Image should be updated"
|
||||
);
|
||||
|
||||
if let Some(env) = &container.env {
|
||||
let has_version = env
|
||||
.iter()
|
||||
.any(|e| e.name == "VERSION" && e.value == Some("2.0".to_string()));
|
||||
assert!(has_version, "Environment variable VERSION should be set");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let _ = km.deployment_delete(app_name).await;
|
||||
let _ = km.service_delete(app_name).await;
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
//! Edge case and error scenario tests for Kubernetes module
|
||||
//!
|
||||
//! These tests verify proper error handling and edge case behavior.
|
||||
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Check if Kubernetes integration tests should run
|
||||
fn should_run_k8s_tests() -> bool {
|
||||
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployment_with_invalid_image() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Clean up any existing test deployment
|
||||
let _ = km.deployment_delete("test-invalid-image").await;
|
||||
|
||||
// Try to create deployment with invalid image name
|
||||
let result = km
|
||||
.deployment_create(
|
||||
"test-invalid-image",
|
||||
"invalid/image/name/that/does/not/exist:latest",
|
||||
1,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
// The deployment creation should succeed (Kubernetes validates images at runtime)
|
||||
assert!(result.is_ok(), "Deployment creation should succeed even with invalid image");
|
||||
|
||||
// Clean up
|
||||
let _ = km.deployment_delete("test-invalid-image").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployment_with_empty_name() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Try to create deployment with empty name
|
||||
let result = km
|
||||
.deployment_create("", "nginx:latest", 1, None, None)
|
||||
.await;
|
||||
|
||||
// Should fail due to invalid name
|
||||
assert!(result.is_err(), "Deployment with empty name should fail");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployment_with_invalid_replicas() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Clean up any existing test deployment
|
||||
let _ = km.deployment_delete("test-invalid-replicas").await;
|
||||
|
||||
// Try to create deployment with negative replicas
|
||||
let result = km
|
||||
.deployment_create("test-invalid-replicas", "nginx:latest", -1, None, None)
|
||||
.await;
|
||||
|
||||
// Should fail due to invalid replica count
|
||||
assert!(result.is_err(), "Deployment with negative replicas should fail");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployment_with_large_env_vars() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Clean up any existing test deployment
|
||||
let _ = km.deployment_delete("test-large-env").await;
|
||||
|
||||
// Create deployment with many environment variables
|
||||
let mut env_vars = HashMap::new();
|
||||
for i in 0..50 {
|
||||
env_vars.insert(format!("TEST_VAR_{}", i), format!("value_{}", i));
|
||||
}
|
||||
|
||||
let result = km
|
||||
.deployment_create("test-large-env", "nginx:latest", 1, None, Some(env_vars))
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "Deployment with many env vars should succeed: {:?}", result);
|
||||
|
||||
// Verify the deployment was created
|
||||
let deployment = km.deployment_get("test-large-env").await;
|
||||
assert!(deployment.is_ok(), "Should be able to get deployment with many env vars");
|
||||
|
||||
// Verify environment variables count
|
||||
let deployment = deployment.unwrap();
|
||||
if let Some(spec) = &deployment.spec {
|
||||
if let Some(template) = &spec.template.spec {
|
||||
if let Some(container) = template.containers.first() {
|
||||
if let Some(env) = &container.env {
|
||||
assert_eq!(env.len(), 50, "Should have 50 environment variables");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let _ = km.deployment_delete("test-large-env").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployment_with_special_characters_in_env_vars() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Clean up any existing test deployment
|
||||
let _ = km.deployment_delete("test-special-env").await;
|
||||
|
||||
// Create deployment with special characters in environment variables
|
||||
let mut env_vars = HashMap::new();
|
||||
env_vars.insert("DATABASE_URL".to_string(), "postgres://user:pass@host:5432/db?ssl=true".to_string());
|
||||
env_vars.insert("JSON_CONFIG".to_string(), r#"{"key": "value", "number": 123}"#.to_string());
|
||||
env_vars.insert("MULTILINE_VAR".to_string(), "line1\nline2\nline3".to_string());
|
||||
env_vars.insert("SPECIAL_CHARS".to_string(), "!@#$%^&*()_+-=[]{}|;:,.<>?".to_string());
|
||||
|
||||
let result = km
|
||||
.deployment_create("test-special-env", "nginx:latest", 1, None, Some(env_vars))
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "Deployment with special chars in env vars should succeed: {:?}", result);
|
||||
|
||||
// Verify the deployment was created and env vars are preserved
|
||||
let deployment = km.deployment_get("test-special-env").await;
|
||||
assert!(deployment.is_ok(), "Should be able to get deployment");
|
||||
|
||||
let deployment = deployment.unwrap();
|
||||
if let Some(spec) = &deployment.spec {
|
||||
if let Some(template) = &spec.template.spec {
|
||||
if let Some(container) = template.containers.first() {
|
||||
if let Some(env) = &container.env {
|
||||
let env_map: HashMap<String, String> = env
|
||||
.iter()
|
||||
.filter_map(|e| e.value.as_ref().map(|v| (e.name.clone(), v.clone())))
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
env_map.get("DATABASE_URL"),
|
||||
Some(&"postgres://user:pass@host:5432/db?ssl=true".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
env_map.get("JSON_CONFIG"),
|
||||
Some(&r#"{"key": "value", "number": 123}"#.to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
env_map.get("SPECIAL_CHARS"),
|
||||
Some(&"!@#$%^&*()_+-=[]{}|;:,.<>?".to_string())
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let _ = km.deployment_delete("test-special-env").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_application_with_invalid_port() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Try to deploy application with invalid port (negative)
|
||||
let result = km
|
||||
.deploy_application("test-invalid-port", "nginx:latest", 1, -80, None, None)
|
||||
.await;
|
||||
|
||||
// Should fail due to invalid port
|
||||
assert!(result.is_err(), "Deploy application with negative port should fail");
|
||||
|
||||
// Try with port 0
|
||||
let result = km
|
||||
.deploy_application("test-zero-port", "nginx:latest", 1, 0, None, None)
|
||||
.await;
|
||||
|
||||
// Should fail due to invalid port
|
||||
assert!(result.is_err(), "Deploy application with port 0 should fail");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_nonexistent_deployment() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Try to get a deployment that doesn't exist
|
||||
let result = km.deployment_get("nonexistent-deployment-12345").await;
|
||||
|
||||
// Should fail with appropriate error
|
||||
assert!(result.is_err(), "Getting nonexistent deployment should fail");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_nonexistent_deployment() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Try to delete a deployment that doesn't exist
|
||||
let result = km.deployment_delete("nonexistent-deployment-12345").await;
|
||||
|
||||
// Should fail gracefully
|
||||
assert!(result.is_err(), "Deleting nonexistent deployment should fail");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployment_with_zero_replicas() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Clean up any existing test deployment
|
||||
let _ = km.deployment_delete("test-zero-replicas").await;
|
||||
|
||||
// Create deployment with zero replicas (should be valid)
|
||||
let result = km
|
||||
.deployment_create("test-zero-replicas", "nginx:latest", 0, None, None)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "Deployment with zero replicas should succeed: {:?}", result);
|
||||
|
||||
// Verify the deployment was created with 0 replicas
|
||||
let deployment = km.deployment_get("test-zero-replicas").await;
|
||||
assert!(deployment.is_ok(), "Should be able to get deployment with zero replicas");
|
||||
|
||||
let deployment = deployment.unwrap();
|
||||
if let Some(spec) = &deployment.spec {
|
||||
assert_eq!(spec.replicas, Some(0), "Should have 0 replicas");
|
||||
}
|
||||
|
||||
// Clean up
|
||||
let _ = km.deployment_delete("test-zero-replicas").await;
|
||||
}
|
||||
@@ -1,385 +0,0 @@
|
||||
//! Integration tests for SAL Kubernetes
|
||||
//!
|
||||
//! These tests require a running Kubernetes cluster and appropriate credentials.
|
||||
//! Set KUBERNETES_TEST_ENABLED=1 to run these tests.
|
||||
|
||||
use sal_kubernetes::KubernetesManager;
|
||||
|
||||
/// Check if Kubernetes integration tests should run
|
||||
fn should_run_k8s_tests() -> bool {
|
||||
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_kubernetes_manager_creation() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
let result = KubernetesManager::new("default").await;
|
||||
match result {
|
||||
Ok(_) => println!("Successfully created KubernetesManager"),
|
||||
Err(e) => println!("Failed to create KubernetesManager: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_namespace_operations() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return, // Skip if can't connect
|
||||
};
|
||||
|
||||
// Test namespace creation (should be idempotent)
|
||||
let test_namespace = "sal-test-namespace";
|
||||
let result = km.namespace_create(test_namespace).await;
|
||||
assert!(result.is_ok(), "Failed to create namespace: {:?}", result);
|
||||
|
||||
// Test creating the same namespace again (should not error)
|
||||
let result = km.namespace_create(test_namespace).await;
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to create namespace idempotently: {:?}",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pods_list() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return, // Skip if can't connect
|
||||
};
|
||||
|
||||
let result = km.pods_list().await;
|
||||
match result {
|
||||
Ok(pods) => {
|
||||
println!("Found {} pods in default namespace", pods.len());
|
||||
|
||||
// Verify pod structure
|
||||
for pod in pods.iter().take(3) {
|
||||
// Check first 3 pods
|
||||
assert!(pod.metadata.name.is_some());
|
||||
assert!(pod.metadata.namespace.is_some());
|
||||
println!(
|
||||
"Pod: {} in namespace: {}",
|
||||
pod.metadata.name.as_ref().unwrap(),
|
||||
pod.metadata.namespace.as_ref().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to list pods: {}", e);
|
||||
// Don't fail the test if we can't list pods due to permissions
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_services_list() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let result = km.services_list().await;
|
||||
match result {
|
||||
Ok(services) => {
|
||||
println!("Found {} services in default namespace", services.len());
|
||||
|
||||
// Verify service structure
|
||||
for service in services.iter().take(3) {
|
||||
assert!(service.metadata.name.is_some());
|
||||
println!("Service: {}", service.metadata.name.as_ref().unwrap());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to list services: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deployments_list() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let result = km.deployments_list().await;
|
||||
match result {
|
||||
Ok(deployments) => {
|
||||
println!(
|
||||
"Found {} deployments in default namespace",
|
||||
deployments.len()
|
||||
);
|
||||
|
||||
// Verify deployment structure
|
||||
for deployment in deployments.iter().take(3) {
|
||||
assert!(deployment.metadata.name.is_some());
|
||||
println!("Deployment: {}", deployment.metadata.name.as_ref().unwrap());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to list deployments: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resource_counts() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let result = km.resource_counts().await;
|
||||
match result {
|
||||
Ok(counts) => {
|
||||
println!("Resource counts: {:?}", counts);
|
||||
|
||||
// Verify expected resource types are present
|
||||
assert!(counts.contains_key("pods"));
|
||||
assert!(counts.contains_key("services"));
|
||||
assert!(counts.contains_key("deployments"));
|
||||
assert!(counts.contains_key("configmaps"));
|
||||
assert!(counts.contains_key("secrets"));
|
||||
|
||||
// Verify counts are reasonable (counts are usize, so always non-negative)
|
||||
for (resource_type, count) in counts {
|
||||
// Verify we got a count for each resource type
|
||||
println!("Resource type '{}' has {} items", resource_type, count);
|
||||
// Counts should be reasonable (not impossibly large)
|
||||
assert!(
|
||||
count < 10000,
|
||||
"Count for {} seems unreasonably high: {}",
|
||||
resource_type,
|
||||
count
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to get resource counts: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_namespaces_list() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let result = km.namespaces_list().await;
|
||||
match result {
|
||||
Ok(namespaces) => {
|
||||
println!("Found {} namespaces", namespaces.len());
|
||||
|
||||
// Should have at least default namespace
|
||||
let namespace_names: Vec<String> = namespaces
|
||||
.iter()
|
||||
.filter_map(|ns| ns.metadata.name.as_ref())
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
println!("Namespaces: {:?}", namespace_names);
|
||||
assert!(namespace_names.contains(&"default".to_string()));
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to list namespaces: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pattern_matching_dry_run() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Test pattern matching without actually deleting anything
|
||||
// We'll just verify that the regex patterns work correctly
|
||||
let test_patterns = vec![
|
||||
"test-.*", // Should match anything starting with "test-"
|
||||
".*-temp$", // Should match anything ending with "-temp"
|
||||
"nonexistent-.*", // Should match nothing (hopefully)
|
||||
];
|
||||
|
||||
for pattern in test_patterns {
|
||||
println!("Testing pattern: {}", pattern);
|
||||
|
||||
// Get all pods first
|
||||
if let Ok(pods) = km.pods_list().await {
|
||||
let regex = regex::Regex::new(pattern).unwrap();
|
||||
let matching_pods: Vec<_> = pods
|
||||
.iter()
|
||||
.filter_map(|pod| pod.metadata.name.as_ref())
|
||||
.filter(|name| regex.is_match(name))
|
||||
.collect();
|
||||
|
||||
println!(
|
||||
"Pattern '{}' would match {} pods: {:?}",
|
||||
pattern,
|
||||
matching_pods.len(),
|
||||
matching_pods
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_namespace_exists_functionality() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Test that default namespace exists
|
||||
let result = km.namespace_exists("default").await;
|
||||
match result {
|
||||
Ok(exists) => {
|
||||
assert!(exists, "Default namespace should exist");
|
||||
println!("Default namespace exists: {}", exists);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to check if default namespace exists: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Test that a non-existent namespace doesn't exist
|
||||
let result = km.namespace_exists("definitely-does-not-exist-12345").await;
|
||||
match result {
|
||||
Ok(exists) => {
|
||||
assert!(!exists, "Non-existent namespace should not exist");
|
||||
println!("Non-existent namespace exists: {}", exists);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to check if non-existent namespace exists: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_manager_namespace_property() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let test_namespace = "test-namespace";
|
||||
let km = match KubernetesManager::new(test_namespace).await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Verify the manager knows its namespace
|
||||
assert_eq!(km.namespace(), test_namespace);
|
||||
println!("Manager namespace: {}", km.namespace());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_handling() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Test getting a non-existent pod
|
||||
let result = km.pod_get("definitely-does-not-exist-12345").await;
|
||||
assert!(result.is_err(), "Getting non-existent pod should fail");
|
||||
|
||||
if let Err(e) = result {
|
||||
println!("Expected error for non-existent pod: {}", e);
|
||||
// Verify it's the right kind of error
|
||||
match e {
|
||||
sal_kubernetes::KubernetesError::ApiError(_) => {
|
||||
println!("Correctly got API error for non-existent resource");
|
||||
}
|
||||
_ => {
|
||||
println!("Got unexpected error type: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_configmaps_and_secrets() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let km = match KubernetesManager::new("default").await {
|
||||
Ok(km) => km,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Test configmaps listing
|
||||
let result = km.configmaps_list().await;
|
||||
match result {
|
||||
Ok(configmaps) => {
|
||||
println!("Found {} configmaps in default namespace", configmaps.len());
|
||||
for cm in configmaps.iter().take(3) {
|
||||
if let Some(name) = &cm.metadata.name {
|
||||
println!("ConfigMap: {}", name);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to list configmaps: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Test secrets listing
|
||||
let result = km.secrets_list().await;
|
||||
match result {
|
||||
Ok(secrets) => {
|
||||
println!("Found {} secrets in default namespace", secrets.len());
|
||||
for secret in secrets.iter().take(3) {
|
||||
if let Some(name) = &secret.metadata.name {
|
||||
println!("Secret: {}", name);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to list secrets: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,231 +0,0 @@
|
||||
//! Production readiness tests for SAL Kubernetes
|
||||
//!
|
||||
//! These tests verify that the module is ready for real-world production use.
|
||||
|
||||
#[cfg(test)]
|
||||
mod production_tests {
|
||||
use sal_kubernetes::{KubernetesConfig, KubernetesManager};
|
||||
use std::time::Duration;
|
||||
|
||||
/// Check if Kubernetes integration tests should run
|
||||
fn should_run_k8s_tests() -> bool {
|
||||
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_production_configuration_profiles() {
|
||||
// Test all pre-configured profiles work
|
||||
let configs = vec![
|
||||
("default", KubernetesConfig::default()),
|
||||
("high_throughput", KubernetesConfig::high_throughput()),
|
||||
("low_latency", KubernetesConfig::low_latency()),
|
||||
("development", KubernetesConfig::development()),
|
||||
];
|
||||
|
||||
for (name, config) in configs {
|
||||
println!("Testing {} configuration profile", name);
|
||||
|
||||
// Verify configuration values are reasonable
|
||||
assert!(
|
||||
config.operation_timeout >= Duration::from_secs(5),
|
||||
"{} timeout too short",
|
||||
name
|
||||
);
|
||||
assert!(
|
||||
config.operation_timeout <= Duration::from_secs(300),
|
||||
"{} timeout too long",
|
||||
name
|
||||
);
|
||||
assert!(config.max_retries <= 10, "{} too many retries", name);
|
||||
assert!(config.rate_limit_rps >= 1, "{} rate limit too low", name);
|
||||
assert!(
|
||||
config.rate_limit_burst >= config.rate_limit_rps,
|
||||
"{} burst should be >= RPS",
|
||||
name
|
||||
);
|
||||
|
||||
println!("✓ {} configuration is valid", name);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_real_cluster_operations() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping real cluster test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
println!("🔍 Testing production operations with real cluster...");
|
||||
|
||||
// Test with production-like configuration
|
||||
let config = KubernetesConfig::default()
|
||||
.with_timeout(Duration::from_secs(30))
|
||||
.with_retries(3, Duration::from_secs(1), Duration::from_secs(10))
|
||||
.with_rate_limit(5, 10); // Conservative for testing
|
||||
|
||||
let km = KubernetesManager::with_config("default", config)
|
||||
.await
|
||||
.expect("Should connect to cluster");
|
||||
|
||||
println!("✅ Connected to cluster successfully");
|
||||
|
||||
// Test basic operations
|
||||
let namespaces = km.namespaces_list().await.expect("Should list namespaces");
|
||||
println!("✅ Listed {} namespaces", namespaces.len());
|
||||
|
||||
let pods = km.pods_list().await.expect("Should list pods");
|
||||
println!("✅ Listed {} pods in default namespace", pods.len());
|
||||
|
||||
let counts = km
|
||||
.resource_counts()
|
||||
.await
|
||||
.expect("Should get resource counts");
|
||||
println!("✅ Got resource counts for {} resource types", counts.len());
|
||||
|
||||
// Test namespace operations
|
||||
let test_ns = "sal-production-test";
|
||||
km.namespace_create(test_ns)
|
||||
.await
|
||||
.expect("Should create test namespace");
|
||||
println!("✅ Created test namespace: {}", test_ns);
|
||||
|
||||
let exists = km
|
||||
.namespace_exists(test_ns)
|
||||
.await
|
||||
.expect("Should check namespace existence");
|
||||
assert!(exists, "Test namespace should exist");
|
||||
println!("✅ Verified test namespace exists");
|
||||
|
||||
println!("🎉 All production operations completed successfully!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_handling_robustness() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping error handling test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
println!("🔍 Testing error handling robustness...");
|
||||
|
||||
let km = KubernetesManager::new("default")
|
||||
.await
|
||||
.expect("Should connect to cluster");
|
||||
|
||||
// Test with invalid namespace name (should handle gracefully)
|
||||
let result = km.namespace_exists("").await;
|
||||
match result {
|
||||
Ok(_) => println!("✅ Empty namespace name handled"),
|
||||
Err(e) => println!("✅ Empty namespace name rejected: {}", e),
|
||||
}
|
||||
|
||||
// Test with very long namespace name
|
||||
let long_name = "a".repeat(100);
|
||||
let result = km.namespace_exists(&long_name).await;
|
||||
match result {
|
||||
Ok(_) => println!("✅ Long namespace name handled"),
|
||||
Err(e) => println!("✅ Long namespace name rejected: {}", e),
|
||||
}
|
||||
|
||||
println!("✅ Error handling is robust");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_operations() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping concurrency test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
println!("🔍 Testing concurrent operations...");
|
||||
|
||||
let km = KubernetesManager::new("default")
|
||||
.await
|
||||
.expect("Should connect to cluster");
|
||||
|
||||
// Test multiple concurrent operations
|
||||
let task1 = tokio::spawn({
|
||||
let km = km.clone();
|
||||
async move { km.pods_list().await }
|
||||
});
|
||||
let task2 = tokio::spawn({
|
||||
let km = km.clone();
|
||||
async move { km.services_list().await }
|
||||
});
|
||||
let task3 = tokio::spawn({
|
||||
let km = km.clone();
|
||||
async move { km.namespaces_list().await }
|
||||
});
|
||||
|
||||
let mut success_count = 0;
|
||||
|
||||
// Handle each task result
|
||||
match task1.await {
|
||||
Ok(Ok(_)) => {
|
||||
success_count += 1;
|
||||
println!("✅ Pods list operation succeeded");
|
||||
}
|
||||
Ok(Err(e)) => println!("⚠️ Pods list operation failed: {}", e),
|
||||
Err(e) => println!("⚠️ Pods task join failed: {}", e),
|
||||
}
|
||||
|
||||
match task2.await {
|
||||
Ok(Ok(_)) => {
|
||||
success_count += 1;
|
||||
println!("✅ Services list operation succeeded");
|
||||
}
|
||||
Ok(Err(e)) => println!("⚠️ Services list operation failed: {}", e),
|
||||
Err(e) => println!("⚠️ Services task join failed: {}", e),
|
||||
}
|
||||
|
||||
match task3.await {
|
||||
Ok(Ok(_)) => {
|
||||
success_count += 1;
|
||||
println!("✅ Namespaces list operation succeeded");
|
||||
}
|
||||
Ok(Err(e)) => println!("⚠️ Namespaces list operation failed: {}", e),
|
||||
Err(e) => println!("⚠️ Namespaces task join failed: {}", e),
|
||||
}
|
||||
|
||||
assert!(
|
||||
success_count >= 2,
|
||||
"At least 2 concurrent operations should succeed"
|
||||
);
|
||||
println!(
|
||||
"✅ Concurrent operations handled well ({}/3 succeeded)",
|
||||
success_count
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_and_validation() {
|
||||
println!("🔍 Testing security and validation...");
|
||||
|
||||
// Test regex pattern validation
|
||||
let dangerous_patterns = vec![
|
||||
".*", // Too broad
|
||||
".+", // Too broad
|
||||
"", // Empty
|
||||
"a{1000000}", // Potential ReDoS
|
||||
];
|
||||
|
||||
for pattern in dangerous_patterns {
|
||||
match regex::Regex::new(pattern) {
|
||||
Ok(_) => println!("⚠️ Pattern '{}' accepted (review if safe)", pattern),
|
||||
Err(_) => println!("✅ Pattern '{}' rejected", pattern),
|
||||
}
|
||||
}
|
||||
|
||||
// Test safe patterns
|
||||
let safe_patterns = vec!["^test-.*$", "^app-[a-z0-9]+$", "^namespace-\\d+$"];
|
||||
|
||||
for pattern in safe_patterns {
|
||||
match regex::Regex::new(pattern) {
|
||||
Ok(_) => println!("✅ Safe pattern '{}' accepted", pattern),
|
||||
Err(e) => println!("❌ Safe pattern '{}' rejected: {}", pattern, e),
|
||||
}
|
||||
}
|
||||
|
||||
println!("✅ Security validation completed");
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
//! Basic Kubernetes operations test
|
||||
//!
|
||||
//! This script tests basic Kubernetes functionality through Rhai.
|
||||
|
||||
print("=== Basic Kubernetes Operations Test ===");
|
||||
|
||||
// Test 1: Create KubernetesManager
|
||||
print("Test 1: Creating KubernetesManager...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
let ns = namespace(km);
|
||||
print("✓ Created manager for namespace: " + ns);
|
||||
if ns != "default" {
|
||||
print("❌ ERROR: Expected namespace 'default', got '" + ns + "'");
|
||||
} else {
|
||||
print("✓ Namespace validation passed");
|
||||
}
|
||||
|
||||
// Test 2: Function availability check
|
||||
print("\nTest 2: Checking function availability...");
|
||||
let functions = [
|
||||
"pods_list",
|
||||
"services_list",
|
||||
"deployments_list",
|
||||
"namespaces_list",
|
||||
"resource_counts",
|
||||
"namespace_create",
|
||||
"namespace_exists",
|
||||
"delete",
|
||||
"pod_delete",
|
||||
"service_delete",
|
||||
"deployment_delete"
|
||||
];
|
||||
|
||||
for func_name in functions {
|
||||
print("✓ Function '" + func_name + "' is available");
|
||||
}
|
||||
|
||||
// Test 3: Basic operations (if cluster is available)
|
||||
print("\nTest 3: Testing basic operations...");
|
||||
try {
|
||||
// Test namespace existence
|
||||
let default_exists = namespace_exists(km, "default");
|
||||
print("✓ Default namespace exists: " + default_exists);
|
||||
|
||||
// Test resource counting
|
||||
let counts = resource_counts(km);
|
||||
print("✓ Resource counts retrieved: " + counts.len() + " resource types");
|
||||
|
||||
// Test namespace listing
|
||||
let namespaces = namespaces_list(km);
|
||||
print("✓ Found " + namespaces.len() + " namespaces");
|
||||
|
||||
// Test pod listing
|
||||
let pods = pods_list(km);
|
||||
print("✓ Found " + pods.len() + " pods in default namespace");
|
||||
|
||||
print("\n=== All basic tests passed! ===");
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Some operations failed (likely no cluster): " + e);
|
||||
print("✓ Function registration tests passed");
|
||||
}
|
||||
@@ -1,200 +0,0 @@
|
||||
//! CRUD operations test in Rhai
|
||||
//!
|
||||
//! This script tests all Create, Read, Update, Delete operations through Rhai.
|
||||
|
||||
print("=== CRUD Operations Test ===");
|
||||
|
||||
// Test 1: Create manager
|
||||
print("Test 1: Creating KubernetesManager...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✓ Manager created for namespace: " + namespace(km));
|
||||
|
||||
// Test 2: Create test namespace
|
||||
print("\nTest 2: Creating test namespace...");
|
||||
let test_ns = "rhai-crud-test";
|
||||
try {
|
||||
km.create_namespace(test_ns);
|
||||
print("✓ Created test namespace: " + test_ns);
|
||||
|
||||
// Verify it exists
|
||||
let exists = km.namespace_exists(test_ns);
|
||||
if exists {
|
||||
print("✓ Verified test namespace exists");
|
||||
} else {
|
||||
print("❌ Test namespace creation failed");
|
||||
}
|
||||
} catch(e) {
|
||||
print("Note: Namespace creation failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
// Test 3: Switch to test namespace and create resources
|
||||
print("\nTest 3: Creating resources in test namespace...");
|
||||
try {
|
||||
let test_km = kubernetes_manager_new(test_ns);
|
||||
|
||||
// Create ConfigMap
|
||||
let config_data = #{
|
||||
"app.properties": "debug=true\nport=8080",
|
||||
"config.yaml": "key: value\nenv: test"
|
||||
};
|
||||
let configmap_name = test_km.create_configmap("rhai-config", config_data);
|
||||
print("✓ Created ConfigMap: " + configmap_name);
|
||||
|
||||
// Create Secret
|
||||
let secret_data = #{
|
||||
"username": "rhaiuser",
|
||||
"password": "secret456"
|
||||
};
|
||||
let secret_name = test_km.create_secret("rhai-secret", secret_data, "Opaque");
|
||||
print("✓ Created Secret: " + secret_name);
|
||||
|
||||
// Create Pod
|
||||
let pod_labels = #{
|
||||
"app": "rhai-app",
|
||||
"version": "v1"
|
||||
};
|
||||
let pod_name = test_km.create_pod("rhai-pod", "nginx:alpine", pod_labels);
|
||||
print("✓ Created Pod: " + pod_name);
|
||||
|
||||
// Create Service
|
||||
let service_selector = #{
|
||||
"app": "rhai-app"
|
||||
};
|
||||
let service_name = test_km.create_service("rhai-service", service_selector, 80, 80);
|
||||
print("✓ Created Service: " + service_name);
|
||||
|
||||
// Create Deployment
|
||||
let deployment_labels = #{
|
||||
"app": "rhai-app",
|
||||
"tier": "frontend"
|
||||
};
|
||||
let deployment_name = test_km.create_deployment("rhai-deployment", "nginx:alpine", 2, deployment_labels, #{});
|
||||
print("✓ Created Deployment: " + deployment_name);
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Resource creation failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
// Test 4: Read operations
|
||||
print("\nTest 4: Reading resources...");
|
||||
try {
|
||||
let test_km = kubernetes_manager_new(test_ns);
|
||||
|
||||
// List all resources
|
||||
let pods = pods_list(test_km);
|
||||
print("✓ Found " + pods.len() + " pods");
|
||||
|
||||
let services = services_list(test_km);
|
||||
print("✓ Found " + services.len() + " services");
|
||||
|
||||
let deployments = deployments_list(test_km);
|
||||
print("✓ Found " + deployments.len() + " deployments");
|
||||
|
||||
// Get resource counts
|
||||
let counts = resource_counts(test_km);
|
||||
print("✓ Resource counts for " + counts.len() + " resource types");
|
||||
for resource_type in counts.keys() {
|
||||
let count = counts[resource_type];
|
||||
print(" " + resource_type + ": " + count);
|
||||
}
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Resource reading failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
// Test 5: Delete operations
|
||||
print("\nTest 5: Deleting resources...");
|
||||
try {
|
||||
let test_km = kubernetes_manager_new(test_ns);
|
||||
|
||||
// Delete individual resources
|
||||
test_km.delete_pod("rhai-pod");
|
||||
print("✓ Deleted pod");
|
||||
|
||||
test_km.delete_service("rhai-service");
|
||||
print("✓ Deleted service");
|
||||
|
||||
test_km.delete_deployment("rhai-deployment");
|
||||
print("✓ Deleted deployment");
|
||||
|
||||
test_km.delete_configmap("rhai-config");
|
||||
print("✓ Deleted configmap");
|
||||
|
||||
test_km.delete_secret("rhai-secret");
|
||||
print("✓ Deleted secret");
|
||||
|
||||
// Verify cleanup
|
||||
let final_counts = resource_counts(test_km);
|
||||
print("✓ Final resource counts:");
|
||||
for resource_type in final_counts.keys() {
|
||||
let count = final_counts[resource_type];
|
||||
print(" " + resource_type + ": " + count);
|
||||
}
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Resource deletion failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
// Test 6: Cleanup test namespace
|
||||
print("\nTest 6: Cleaning up test namespace...");
|
||||
try {
|
||||
km.delete_namespace(test_ns);
|
||||
print("✓ Deleted test namespace: " + test_ns);
|
||||
} catch(e) {
|
||||
print("Note: Namespace deletion failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
// Test 7: Function availability check
|
||||
print("\nTest 7: Checking all CRUD functions are available...");
|
||||
let crud_functions = [
|
||||
// Create methods (object-oriented style)
|
||||
"create_pod",
|
||||
"create_service",
|
||||
"create_deployment",
|
||||
"create_configmap",
|
||||
"create_secret",
|
||||
"create_namespace",
|
||||
|
||||
// Get methods
|
||||
"get_pod",
|
||||
"get_service",
|
||||
"get_deployment",
|
||||
|
||||
// List methods
|
||||
"pods_list",
|
||||
"services_list",
|
||||
"deployments_list",
|
||||
"configmaps_list",
|
||||
"secrets_list",
|
||||
"namespaces_list",
|
||||
"resource_counts",
|
||||
"namespace_exists",
|
||||
|
||||
// Delete methods
|
||||
"delete_pod",
|
||||
"delete_service",
|
||||
"delete_deployment",
|
||||
"delete_configmap",
|
||||
"delete_secret",
|
||||
"delete_namespace",
|
||||
"delete"
|
||||
];
|
||||
|
||||
for func_name in crud_functions {
|
||||
print("✓ Function '" + func_name + "' is available");
|
||||
}
|
||||
|
||||
print("\n=== CRUD Operations Test Summary ===");
|
||||
print("✅ All " + crud_functions.len() + " CRUD functions are registered");
|
||||
print("✅ Create operations: 6 functions");
|
||||
print("✅ Read operations: 8 functions");
|
||||
print("✅ Delete operations: 7 functions");
|
||||
print("✅ Total CRUD capabilities: 21 functions");
|
||||
|
||||
print("\n🎉 Complete CRUD operations test completed!");
|
||||
print("\nYour SAL Kubernetes module now supports:");
|
||||
print(" ✅ Full resource lifecycle management");
|
||||
print(" ✅ Namespace operations");
|
||||
print(" ✅ All major Kubernetes resource types");
|
||||
print(" ✅ Production-ready error handling");
|
||||
print(" ✅ Rhai scripting integration");
|
||||
@@ -1,199 +0,0 @@
|
||||
// Rhai test for environment variables functionality
|
||||
// This test verifies that the enhanced deploy_application function works correctly with environment variables
|
||||
|
||||
print("=== Testing Environment Variables in Rhai ===");
|
||||
|
||||
// Create Kubernetes manager
|
||||
print("Creating Kubernetes manager...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✓ Kubernetes manager created");
|
||||
|
||||
// Test 1: Deploy application with environment variables
|
||||
print("\n--- Test 1: Deploy with Environment Variables ---");
|
||||
|
||||
// Clean up any existing resources
|
||||
try {
|
||||
delete_deployment(km, "rhai-env-test");
|
||||
print("✓ Cleaned up existing deployment");
|
||||
} catch(e) {
|
||||
print("✓ No existing deployment to clean up");
|
||||
}
|
||||
|
||||
try {
|
||||
delete_service(km, "rhai-env-test");
|
||||
print("✓ Cleaned up existing service");
|
||||
} catch(e) {
|
||||
print("✓ No existing service to clean up");
|
||||
}
|
||||
|
||||
// Deploy with both labels and environment variables
|
||||
try {
|
||||
let result = deploy_application(km, "rhai-env-test", "nginx:latest", 1, 80, #{
|
||||
"app": "rhai-env-test",
|
||||
"test": "environment-variables",
|
||||
"language": "rhai"
|
||||
}, #{
|
||||
"NODE_ENV": "test",
|
||||
"DATABASE_URL": "postgres://localhost:5432/test",
|
||||
"API_KEY": "test-api-key-12345",
|
||||
"LOG_LEVEL": "debug",
|
||||
"PORT": "80"
|
||||
});
|
||||
print("✓ " + result);
|
||||
} catch(e) {
|
||||
print("❌ Failed to deploy with env vars: " + e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
// Verify deployment was created
|
||||
try {
|
||||
let deployment_name = get_deployment(km, "rhai-env-test");
|
||||
print("✓ Deployment verified: " + deployment_name);
|
||||
} catch(e) {
|
||||
print("❌ Failed to verify deployment: " + e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
// Test 2: Deploy application without environment variables
|
||||
print("\n--- Test 2: Deploy without Environment Variables ---");
|
||||
|
||||
// Clean up
|
||||
try {
|
||||
delete_deployment(km, "rhai-no-env-test");
|
||||
delete_service(km, "rhai-no-env-test");
|
||||
} catch(e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
|
||||
// Deploy with labels only, empty env vars map
|
||||
try {
|
||||
let result = deploy_application(km, "rhai-no-env-test", "nginx:alpine", 1, 8080, #{
|
||||
"app": "rhai-no-env-test",
|
||||
"test": "no-environment-variables"
|
||||
}, #{
|
||||
// Empty environment variables map
|
||||
});
|
||||
print("✓ " + result);
|
||||
} catch(e) {
|
||||
print("❌ Failed to deploy without env vars: " + e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
// Test 3: Deploy with special characters in environment variables
|
||||
print("\n--- Test 3: Deploy with Special Characters in Env Vars ---");
|
||||
|
||||
// Clean up
|
||||
try {
|
||||
delete_deployment(km, "rhai-special-env-test");
|
||||
delete_service(km, "rhai-special-env-test");
|
||||
} catch(e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
|
||||
// Deploy with special characters
|
||||
try {
|
||||
let result = deploy_application(km, "rhai-special-env-test", "nginx:latest", 1, 3000, #{
|
||||
"app": "rhai-special-env-test"
|
||||
}, #{
|
||||
"DATABASE_URL": "postgres://user:pass@host:5432/db?ssl=true&timeout=30",
|
||||
"JSON_CONFIG": `{"server": {"port": 3000, "host": "0.0.0.0"}}`,
|
||||
"SPECIAL_CHARS": "!@#$%^&*()_+-=[]{}|;:,.<>?",
|
||||
"MULTILINE": "line1\nline2\nline3"
|
||||
});
|
||||
print("✓ " + result);
|
||||
} catch(e) {
|
||||
print("❌ Failed to deploy with special chars: " + e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
// Test 4: Test resource listing after deployments
|
||||
print("\n--- Test 4: Verify Resource Listing ---");
|
||||
|
||||
try {
|
||||
let deployments = deployments_list(km);
|
||||
print("✓ Found " + deployments.len() + " deployments");
|
||||
|
||||
// Check that our test deployments are in the list
|
||||
let found_env_test = false;
|
||||
let found_no_env_test = false;
|
||||
let found_special_test = false;
|
||||
|
||||
for deployment in deployments {
|
||||
if deployment == "rhai-env-test" {
|
||||
found_env_test = true;
|
||||
} else if deployment == "rhai-no-env-test" {
|
||||
found_no_env_test = true;
|
||||
} else if deployment == "rhai-special-env-test" {
|
||||
found_special_test = true;
|
||||
}
|
||||
}
|
||||
|
||||
if found_env_test {
|
||||
print("✓ Found rhai-env-test deployment");
|
||||
} else {
|
||||
print("❌ rhai-env-test deployment not found in list");
|
||||
}
|
||||
|
||||
if found_no_env_test {
|
||||
print("✓ Found rhai-no-env-test deployment");
|
||||
} else {
|
||||
print("❌ rhai-no-env-test deployment not found in list");
|
||||
}
|
||||
|
||||
if found_special_test {
|
||||
print("✓ Found rhai-special-env-test deployment");
|
||||
} else {
|
||||
print("❌ rhai-special-env-test deployment not found in list");
|
||||
}
|
||||
} catch(e) {
|
||||
print("❌ Failed to list deployments: " + e);
|
||||
}
|
||||
|
||||
// Test 5: Test services listing
|
||||
print("\n--- Test 5: Verify Services ---");
|
||||
|
||||
try {
|
||||
let services = services_list(km);
|
||||
print("✓ Found " + services.len() + " services");
|
||||
|
||||
// Services should be created for each deployment
|
||||
let service_count = 0;
|
||||
for service in services {
|
||||
if service.contains("rhai-") && service.contains("-test") {
|
||||
service_count = service_count + 1;
|
||||
print("✓ Found test service: " + service);
|
||||
}
|
||||
}
|
||||
|
||||
if service_count >= 3 {
|
||||
print("✓ All expected services found");
|
||||
} else {
|
||||
print("⚠️ Expected at least 3 test services, found " + service_count);
|
||||
}
|
||||
} catch(e) {
|
||||
print("❌ Failed to list services: " + e);
|
||||
}
|
||||
|
||||
// Cleanup all test resources
|
||||
print("\n--- Cleanup ---");
|
||||
|
||||
let cleanup_items = ["rhai-env-test", "rhai-no-env-test", "rhai-special-env-test"];
|
||||
|
||||
for item in cleanup_items {
|
||||
try {
|
||||
delete_deployment(km, item);
|
||||
print("✓ Deleted deployment: " + item);
|
||||
} catch(e) {
|
||||
print("⚠️ Could not delete deployment " + item + ": " + e);
|
||||
}
|
||||
|
||||
try {
|
||||
delete_service(km, item);
|
||||
print("✓ Deleted service: " + item);
|
||||
} catch(e) {
|
||||
print("⚠️ Could not delete service " + item + ": " + e);
|
||||
}
|
||||
}
|
||||
|
||||
print("\n=== Environment Variables Rhai Test Complete ===");
|
||||
print("✅ All tests passed successfully!");
|
||||
@@ -1,85 +0,0 @@
|
||||
//! Namespace operations test
|
||||
//!
|
||||
//! This script tests namespace creation and management operations.
|
||||
|
||||
print("=== Namespace Operations Test ===");
|
||||
|
||||
// Test 1: Create manager
|
||||
print("Test 1: Creating KubernetesManager...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✓ Manager created for namespace: " + namespace(km));
|
||||
|
||||
// Test 2: Namespace existence checks
|
||||
print("\nTest 2: Testing namespace existence...");
|
||||
try {
|
||||
// Test that default namespace exists
|
||||
let default_exists = namespace_exists(km, "default");
|
||||
print("✓ Default namespace exists: " + default_exists);
|
||||
assert(default_exists, "Default namespace should exist");
|
||||
|
||||
// Test non-existent namespace
|
||||
let fake_exists = namespace_exists(km, "definitely-does-not-exist-12345");
|
||||
print("✓ Non-existent namespace check: " + fake_exists);
|
||||
assert(!fake_exists, "Non-existent namespace should not exist");
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Namespace existence tests failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
// Test 3: Namespace creation (if cluster is available)
|
||||
print("\nTest 3: Testing namespace creation...");
|
||||
let test_namespaces = [
|
||||
"rhai-test-namespace-1",
|
||||
"rhai-test-namespace-2"
|
||||
];
|
||||
|
||||
for test_ns in test_namespaces {
|
||||
try {
|
||||
print("Creating namespace: " + test_ns);
|
||||
namespace_create(km, test_ns);
|
||||
print("✓ Created namespace: " + test_ns);
|
||||
|
||||
// Verify it exists
|
||||
let exists = namespace_exists(km, test_ns);
|
||||
print("✓ Verified namespace exists: " + exists);
|
||||
|
||||
// Test idempotent creation
|
||||
namespace_create(km, test_ns);
|
||||
print("✓ Idempotent creation successful for: " + test_ns);
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Namespace creation failed for " + test_ns + " (likely no cluster or permissions): " + e);
|
||||
}
|
||||
}
|
||||
|
||||
// Test 4: List all namespaces
|
||||
print("\nTest 4: Listing all namespaces...");
|
||||
try {
|
||||
let all_namespaces = namespaces_list(km);
|
||||
print("✓ Found " + all_namespaces.len() + " total namespaces");
|
||||
|
||||
// Check for our test namespaces
|
||||
for test_ns in test_namespaces {
|
||||
let found = false;
|
||||
for ns in all_namespaces {
|
||||
if ns == test_ns {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if found {
|
||||
print("✓ Found test namespace in list: " + test_ns);
|
||||
}
|
||||
}
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Namespace listing failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
print("\n--- Cleanup Instructions ---");
|
||||
print("To clean up test namespaces, run:");
|
||||
for test_ns in test_namespaces {
|
||||
print(" kubectl delete namespace " + test_ns);
|
||||
}
|
||||
|
||||
print("\n=== Namespace operations test completed! ===");
|
||||
@@ -1,51 +0,0 @@
|
||||
//! Test for newly added Rhai functions
|
||||
//!
|
||||
//! This script tests the newly added configmaps_list, secrets_list, and delete functions.
|
||||
|
||||
print("=== Testing New Rhai Functions ===");
|
||||
|
||||
// Test 1: Create manager
|
||||
print("Test 1: Creating KubernetesManager...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✓ Manager created for namespace: " + namespace(km));
|
||||
|
||||
// Test 2: Test new listing functions
|
||||
print("\nTest 2: Testing new listing functions...");
|
||||
|
||||
try {
|
||||
// Test configmaps_list
|
||||
let configmaps = configmaps_list(km);
|
||||
print("✓ configmaps_list() works - found " + configmaps.len() + " configmaps");
|
||||
|
||||
// Test secrets_list
|
||||
let secrets = secrets_list(km);
|
||||
print("✓ secrets_list() works - found " + secrets.len() + " secrets");
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Listing functions failed (likely no cluster): " + e);
|
||||
print("✓ Functions are registered and callable");
|
||||
}
|
||||
|
||||
// Test 3: Test function availability
|
||||
print("\nTest 3: Verifying all new functions are available...");
|
||||
let new_functions = [
|
||||
"configmaps_list",
|
||||
"secrets_list",
|
||||
"configmap_delete",
|
||||
"secret_delete",
|
||||
"namespace_delete"
|
||||
];
|
||||
|
||||
for func_name in new_functions {
|
||||
print("✓ Function '" + func_name + "' is available");
|
||||
}
|
||||
|
||||
print("\n=== New Functions Test Summary ===");
|
||||
print("✅ All " + new_functions.len() + " new functions are registered");
|
||||
print("✅ configmaps_list() - List configmaps in namespace");
|
||||
print("✅ secrets_list() - List secrets in namespace");
|
||||
print("✅ configmap_delete() - Delete specific configmap");
|
||||
print("✅ secret_delete() - Delete specific secret");
|
||||
print("✅ namespace_delete() - Delete namespace");
|
||||
|
||||
print("\n🎉 All new Rhai functions are working correctly!");
|
||||
@@ -1,142 +0,0 @@
|
||||
// Rhai test for pod creation with environment variables functionality
|
||||
// This test verifies that the enhanced pod_create function works correctly with environment variables
|
||||
|
||||
print("=== Testing Pod Environment Variables in Rhai ===");
|
||||
|
||||
// Create Kubernetes manager
|
||||
print("Creating Kubernetes manager...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✓ Kubernetes manager created");
|
||||
|
||||
// Test 1: Create pod with environment variables
|
||||
print("\n--- Test 1: Create Pod with Environment Variables ---");
|
||||
|
||||
// Clean up any existing resources
|
||||
try {
|
||||
delete_pod(km, "rhai-pod-env-test");
|
||||
print("✓ Cleaned up existing pod");
|
||||
} catch(e) {
|
||||
print("✓ No existing pod to clean up");
|
||||
}
|
||||
|
||||
// Create pod with both labels and environment variables
|
||||
try {
|
||||
let result = km.create_pod_with_env("rhai-pod-env-test", "nginx:latest", #{
|
||||
"app": "rhai-pod-env-test",
|
||||
"test": "pod-environment-variables",
|
||||
"language": "rhai"
|
||||
}, #{
|
||||
"NODE_ENV": "test",
|
||||
"DATABASE_URL": "postgres://localhost:5432/test",
|
||||
"API_KEY": "test-api-key-12345",
|
||||
"LOG_LEVEL": "debug",
|
||||
"PORT": "80"
|
||||
});
|
||||
print("✓ Created pod with environment variables: " + result);
|
||||
} catch(e) {
|
||||
print("❌ Failed to create pod with env vars: " + e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
// Test 2: Create pod without environment variables
|
||||
print("\n--- Test 2: Create Pod without Environment Variables ---");
|
||||
|
||||
try {
|
||||
delete_pod(km, "rhai-pod-no-env-test");
|
||||
} catch(e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
|
||||
try {
|
||||
let result = km.create_pod("rhai-pod-no-env-test", "nginx:latest", #{
|
||||
"app": "rhai-pod-no-env-test",
|
||||
"test": "no-environment-variables"
|
||||
});
|
||||
print("✓ Created pod without environment variables: " + result);
|
||||
} catch(e) {
|
||||
print("❌ Failed to create pod without env vars: " + e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
// Test 3: Create pod with special characters in env vars
|
||||
print("\n--- Test 3: Create Pod with Special Characters in Env Vars ---");
|
||||
|
||||
try {
|
||||
delete_pod(km, "rhai-pod-special-env-test");
|
||||
} catch(e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
|
||||
try {
|
||||
let result = km.create_pod_with_env("rhai-pod-special-env-test", "nginx:latest", #{
|
||||
"app": "rhai-pod-special-env-test"
|
||||
}, #{
|
||||
"SPECIAL_CHARS": "Hello, World! @#$%^&*()",
|
||||
"JSON_CONFIG": "{\"key\": \"value\", \"number\": 123}",
|
||||
"URL_WITH_PARAMS": "https://api.example.com/v1/data?param1=value1¶m2=value2"
|
||||
});
|
||||
print("✓ Created pod with special characters in env vars: " + result);
|
||||
} catch(e) {
|
||||
print("❌ Failed to create pod with special env vars: " + e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
// Test 4: Verify resource listing
|
||||
print("\n--- Test 4: Verify Pod Listing ---");
|
||||
try {
|
||||
let pods = pods_list(km);
|
||||
print("✓ Found " + pods.len() + " pods");
|
||||
|
||||
let found_env_test = false;
|
||||
let found_no_env_test = false;
|
||||
let found_special_env_test = false;
|
||||
|
||||
for pod in pods {
|
||||
if pod.contains("rhai-pod-env-test") {
|
||||
found_env_test = true;
|
||||
print("✓ Found rhai-pod-env-test pod");
|
||||
}
|
||||
if pod.contains("rhai-pod-no-env-test") {
|
||||
found_no_env_test = true;
|
||||
print("✓ Found rhai-pod-no-env-test pod");
|
||||
}
|
||||
if pod.contains("rhai-pod-special-env-test") {
|
||||
found_special_env_test = true;
|
||||
print("✓ Found rhai-pod-special-env-test pod");
|
||||
}
|
||||
}
|
||||
|
||||
if found_env_test && found_no_env_test && found_special_env_test {
|
||||
print("✓ All expected pods found");
|
||||
} else {
|
||||
print("❌ Some expected pods not found");
|
||||
}
|
||||
} catch(e) {
|
||||
print("❌ Failed to list pods: " + e);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
print("\n--- Cleanup ---");
|
||||
try {
|
||||
delete_pod(km, "rhai-pod-env-test");
|
||||
print("✓ Deleted pod: rhai-pod-env-test");
|
||||
} catch(e) {
|
||||
print("⚠ Failed to delete rhai-pod-env-test: " + e);
|
||||
}
|
||||
|
||||
try {
|
||||
delete_pod(km, "rhai-pod-no-env-test");
|
||||
print("✓ Deleted pod: rhai-pod-no-env-test");
|
||||
} catch(e) {
|
||||
print("⚠ Failed to delete rhai-pod-no-env-test: " + e);
|
||||
}
|
||||
|
||||
try {
|
||||
delete_pod(km, "rhai-pod-special-env-test");
|
||||
print("✓ Deleted pod: rhai-pod-special-env-test");
|
||||
} catch(e) {
|
||||
print("⚠ Failed to delete rhai-pod-special-env-test: " + e);
|
||||
}
|
||||
|
||||
print("\n=== Pod Environment Variables Rhai Test Complete ===");
|
||||
print("✅ All tests passed successfully!");
|
||||
@@ -1,137 +0,0 @@
|
||||
//! Resource management test
|
||||
//!
|
||||
//! This script tests resource listing and management operations.
|
||||
|
||||
print("=== Resource Management Test ===");
|
||||
|
||||
// Test 1: Create manager
|
||||
print("Test 1: Creating KubernetesManager...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✓ Manager created for namespace: " + namespace(km));
|
||||
|
||||
// Test 2: Resource listing
|
||||
print("\nTest 2: Testing resource listing...");
|
||||
try {
|
||||
// Test pods listing
|
||||
let pods = pods_list(km);
|
||||
print("✓ Pods list: " + pods.len() + " pods found");
|
||||
|
||||
// Test services listing
|
||||
let services = services_list(km);
|
||||
print("✓ Services list: " + services.len() + " services found");
|
||||
|
||||
// Test deployments listing
|
||||
let deployments = deployments_list(km);
|
||||
print("✓ Deployments list: " + deployments.len() + " deployments found");
|
||||
|
||||
// Show some pod names if available
|
||||
if pods.len() > 0 {
|
||||
print("Sample pods:");
|
||||
let count = 0;
|
||||
for pod in pods {
|
||||
if count < 3 {
|
||||
print(" - " + pod);
|
||||
count = count + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Resource listing failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
// Test 3: Resource counts
|
||||
print("\nTest 3: Testing resource counts...");
|
||||
try {
|
||||
let counts = resource_counts(km);
|
||||
print("✓ Resource counts retrieved for " + counts.len() + " resource types");
|
||||
|
||||
// Display counts
|
||||
for resource_type in counts.keys() {
|
||||
let count = counts[resource_type];
|
||||
print(" " + resource_type + ": " + count);
|
||||
}
|
||||
|
||||
// Verify expected resource types are present
|
||||
let expected_types = ["pods", "services", "deployments", "configmaps", "secrets"];
|
||||
for expected_type in expected_types {
|
||||
if expected_type in counts {
|
||||
print("✓ Found expected resource type: " + expected_type);
|
||||
} else {
|
||||
print("⚠ Missing expected resource type: " + expected_type);
|
||||
}
|
||||
}
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Resource counts failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
// Test 4: Multi-namespace comparison
|
||||
print("\nTest 4: Multi-namespace resource comparison...");
|
||||
let test_namespaces = ["default", "kube-system"];
|
||||
let total_resources = #{};
|
||||
|
||||
for ns in test_namespaces {
|
||||
try {
|
||||
let ns_km = kubernetes_manager_new(ns);
|
||||
let counts = resource_counts(ns_km);
|
||||
|
||||
print("Namespace '" + ns + "':");
|
||||
let ns_total = 0;
|
||||
for resource_type in counts.keys() {
|
||||
let count = counts[resource_type];
|
||||
print(" " + resource_type + ": " + count);
|
||||
ns_total = ns_total + count;
|
||||
|
||||
// Accumulate totals
|
||||
if resource_type in total_resources {
|
||||
total_resources[resource_type] = total_resources[resource_type] + count;
|
||||
} else {
|
||||
total_resources[resource_type] = count;
|
||||
}
|
||||
}
|
||||
print(" Total: " + ns_total + " resources");
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Failed to analyze namespace '" + ns + "': " + e);
|
||||
}
|
||||
}
|
||||
|
||||
// Show totals
|
||||
print("\nTotal resources across all namespaces:");
|
||||
let grand_total = 0;
|
||||
for resource_type in total_resources.keys() {
|
||||
let count = total_resources[resource_type];
|
||||
print(" " + resource_type + ": " + count);
|
||||
grand_total = grand_total + count;
|
||||
}
|
||||
print("Grand total: " + grand_total + " resources");
|
||||
|
||||
// Test 5: Pattern matching simulation
|
||||
print("\nTest 5: Pattern matching simulation...");
|
||||
try {
|
||||
let pods = pods_list(km);
|
||||
print("Testing pattern matching on " + pods.len() + " pods:");
|
||||
|
||||
// Simulate pattern matching (since Rhai doesn't have regex)
|
||||
let test_patterns = ["test", "kube", "system", "app"];
|
||||
for pattern in test_patterns {
|
||||
let matches = [];
|
||||
for pod in pods {
|
||||
if pod.contains(pattern) {
|
||||
matches.push(pod);
|
||||
}
|
||||
}
|
||||
print(" Pattern '" + pattern + "' would match " + matches.len() + " pods");
|
||||
if matches.len() > 0 && matches.len() <= 3 {
|
||||
for match in matches {
|
||||
print(" - " + match);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch(e) {
|
||||
print("Note: Pattern matching test failed (likely no cluster): " + e);
|
||||
}
|
||||
|
||||
print("\n=== Resource management test completed! ===");
|
||||
@@ -1,92 +0,0 @@
|
||||
//! Run all Kubernetes Rhai tests
|
||||
//!
|
||||
//! This script runs all the Kubernetes Rhai tests in sequence.
|
||||
|
||||
print("=== Running All Kubernetes Rhai Tests ===");
|
||||
print("");
|
||||
|
||||
// Test configuration
|
||||
let test_files = [
|
||||
"basic_kubernetes.rhai",
|
||||
"namespace_operations.rhai",
|
||||
"resource_management.rhai",
|
||||
"env_vars_test.rhai"
|
||||
];
|
||||
|
||||
let passed_tests = 0;
|
||||
let total_tests = test_files.len();
|
||||
|
||||
print("Found " + total_tests + " test files to run:");
|
||||
for test_file in test_files {
|
||||
print(" - " + test_file);
|
||||
}
|
||||
print("");
|
||||
|
||||
// Note: In a real implementation, we would use eval_file or similar
|
||||
// For now, this serves as documentation of the test structure
|
||||
print("=== Test Execution Summary ===");
|
||||
print("");
|
||||
print("To run these tests individually:");
|
||||
for test_file in test_files {
|
||||
print(" herodo kubernetes/tests/rhai/" + test_file);
|
||||
}
|
||||
print("");
|
||||
|
||||
print("To run with Kubernetes cluster:");
|
||||
print(" KUBERNETES_TEST_ENABLED=1 herodo kubernetes/tests/rhai/basic_kubernetes.rhai");
|
||||
print("");
|
||||
|
||||
// Basic validation that we can create a manager
|
||||
print("=== Quick Validation ===");
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
let ns = namespace(km);
|
||||
print("✓ KubernetesManager creation works");
|
||||
print("✓ Namespace getter works: " + ns);
|
||||
passed_tests = passed_tests + 1;
|
||||
} catch(e) {
|
||||
print("✗ Basic validation failed: " + e);
|
||||
}
|
||||
|
||||
// Test function registration
|
||||
print("");
|
||||
print("=== Function Registration Check ===");
|
||||
let required_functions = [
|
||||
"kubernetes_manager_new",
|
||||
"namespace",
|
||||
"pods_list",
|
||||
"services_list",
|
||||
"deployments_list",
|
||||
"namespaces_list",
|
||||
"resource_counts",
|
||||
"namespace_create",
|
||||
"namespace_exists",
|
||||
"delete",
|
||||
"pod_delete",
|
||||
"service_delete",
|
||||
"deployment_delete",
|
||||
"deploy_application"
|
||||
];
|
||||
|
||||
let registered_functions = 0;
|
||||
for func_name in required_functions {
|
||||
// We can't easily test function existence in Rhai, but we can document them
|
||||
print("✓ " + func_name + " should be registered");
|
||||
registered_functions = registered_functions + 1;
|
||||
}
|
||||
|
||||
print("");
|
||||
print("=== Summary ===");
|
||||
print("Required functions: " + registered_functions + "/" + required_functions.len());
|
||||
if passed_tests > 0 {
|
||||
print("Basic validation: PASSED");
|
||||
} else {
|
||||
print("Basic validation: FAILED");
|
||||
}
|
||||
print("");
|
||||
print("For full testing with a Kubernetes cluster:");
|
||||
print("1. Ensure you have a running Kubernetes cluster");
|
||||
print("2. Set KUBERNETES_TEST_ENABLED=1");
|
||||
print("3. Run individual test files");
|
||||
print("");
|
||||
print("=== All tests documentation completed ===");
|
||||
@@ -1,90 +0,0 @@
|
||||
//! Simple API pattern test
|
||||
//!
|
||||
//! This script demonstrates the new object-oriented API pattern.
|
||||
|
||||
print("=== Object-Oriented API Pattern Test ===");
|
||||
|
||||
// Test 1: Create manager
|
||||
print("Test 1: Creating KubernetesManager...");
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✓ Manager created for namespace: " + namespace(km));
|
||||
|
||||
// Test 2: Show the new API pattern
|
||||
print("\nTest 2: New Object-Oriented API Pattern");
|
||||
print("Now you can use:");
|
||||
print(" km.create_pod(name, image, labels)");
|
||||
print(" km.create_service(name, selector, port, target_port)");
|
||||
print(" km.create_deployment(name, image, replicas, labels)");
|
||||
print(" km.create_configmap(name, data)");
|
||||
print(" km.create_secret(name, data, type)");
|
||||
print(" km.create_namespace(name)");
|
||||
print("");
|
||||
print(" km.get_pod(name)");
|
||||
print(" km.get_service(name)");
|
||||
print(" km.get_deployment(name)");
|
||||
print("");
|
||||
print(" km.delete_pod(name)");
|
||||
print(" km.delete_service(name)");
|
||||
print(" km.delete_deployment(name)");
|
||||
print(" km.delete_configmap(name)");
|
||||
print(" km.delete_secret(name)");
|
||||
print(" km.delete_namespace(name)");
|
||||
print("");
|
||||
print(" km.pods_list()");
|
||||
print(" km.services_list()");
|
||||
print(" km.deployments_list()");
|
||||
print(" km.resource_counts()");
|
||||
print(" km.namespace_exists(name)");
|
||||
|
||||
// Test 3: Function availability check
|
||||
print("\nTest 3: Checking all API methods are available...");
|
||||
let api_methods = [
|
||||
// Create methods
|
||||
"create_pod",
|
||||
"create_service",
|
||||
"create_deployment",
|
||||
"create_configmap",
|
||||
"create_secret",
|
||||
"create_namespace",
|
||||
|
||||
// Get methods
|
||||
"get_pod",
|
||||
"get_service",
|
||||
"get_deployment",
|
||||
|
||||
// List methods
|
||||
"pods_list",
|
||||
"services_list",
|
||||
"deployments_list",
|
||||
"configmaps_list",
|
||||
"secrets_list",
|
||||
"namespaces_list",
|
||||
"resource_counts",
|
||||
"namespace_exists",
|
||||
|
||||
// Delete methods
|
||||
"delete_pod",
|
||||
"delete_service",
|
||||
"delete_deployment",
|
||||
"delete_configmap",
|
||||
"delete_secret",
|
||||
"delete_namespace",
|
||||
"delete"
|
||||
];
|
||||
|
||||
for method_name in api_methods {
|
||||
print("✓ Method 'km." + method_name + "()' is available");
|
||||
}
|
||||
|
||||
print("\n=== API Pattern Summary ===");
|
||||
print("✅ Object-oriented API: km.method_name()");
|
||||
print("✅ " + api_methods.len() + " methods available");
|
||||
print("✅ Consistent naming: create_*, get_*, delete_*, *_list()");
|
||||
print("✅ Full CRUD operations for all resource types");
|
||||
|
||||
print("\n🎉 Object-oriented API pattern is ready!");
|
||||
print("\nExample usage:");
|
||||
print(" let km = kubernetes_manager_new('my-namespace');");
|
||||
print(" let pod = km.create_pod('my-pod', 'nginx:latest', #{});");
|
||||
print(" let pods = km.pods_list();");
|
||||
print(" km.delete_pod('my-pod');");
|
||||
@@ -1,405 +0,0 @@
|
||||
//! Rhai integration tests for SAL Kubernetes
|
||||
//!
|
||||
//! These tests verify that the Rhai wrappers work correctly and can execute
|
||||
//! the Rhai test scripts in the tests/rhai/ directory.
|
||||
|
||||
#[cfg(feature = "rhai")]
|
||||
mod rhai_tests {
|
||||
use rhai::Engine;
|
||||
use sal_kubernetes::rhai::*;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
/// Check if Kubernetes integration tests should run
|
||||
fn should_run_k8s_tests() -> bool {
|
||||
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_register_kubernetes_module() {
|
||||
let mut engine = Engine::new();
|
||||
let result = register_kubernetes_module(&mut engine);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to register Kubernetes module: {:?}",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_kubernetes_functions_registered() {
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Test that the constructor function is registered
|
||||
let script = r#"
|
||||
let result = "";
|
||||
try {
|
||||
let km = kubernetes_manager_new("test");
|
||||
result = "constructor_exists";
|
||||
} catch(e) {
|
||||
result = "constructor_exists_but_failed";
|
||||
}
|
||||
result
|
||||
"#;
|
||||
|
||||
let result = engine.eval::<String>(script);
|
||||
assert!(result.is_ok());
|
||||
let result_value = result.unwrap();
|
||||
assert!(
|
||||
result_value == "constructor_exists" || result_value == "constructor_exists_but_failed",
|
||||
"Expected constructor to be registered, got: {}",
|
||||
result_value
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_rhai_functions_registered() {
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Test that the newly added functions are registered
|
||||
let new_functions_to_test = [
|
||||
"configmaps_list",
|
||||
"secrets_list",
|
||||
"configmap_delete",
|
||||
"secret_delete",
|
||||
"namespace_delete",
|
||||
];
|
||||
|
||||
for func_name in &new_functions_to_test {
|
||||
// Try to compile a script that references the function
|
||||
let script = format!("fn test() {{ {}; }}", func_name);
|
||||
let result = engine.compile(&script);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"New function '{}' should be registered but compilation failed: {:?}",
|
||||
func_name,
|
||||
result
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_function_signatures() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!(
|
||||
"Skipping Rhai function signature tests. Set KUBERNETES_TEST_ENABLED=1 to enable."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Test that the new object-oriented API methods work correctly
|
||||
// These will fail without a cluster, but should not fail due to missing methods
|
||||
let test_scripts = vec![
|
||||
// List methods (still function-based for listing)
|
||||
("pods_list", "let km = kubernetes_manager_new(\"test\"); km.pods_list();"),
|
||||
("services_list", "let km = kubernetes_manager_new(\"test\"); km.services_list();"),
|
||||
("deployments_list", "let km = kubernetes_manager_new(\"test\"); km.deployments_list();"),
|
||||
("namespaces_list", "let km = kubernetes_manager_new(\"test\"); km.namespaces_list();"),
|
||||
("resource_counts", "let km = kubernetes_manager_new(\"test\"); km.resource_counts();"),
|
||||
|
||||
// Create methods (object-oriented)
|
||||
("create_namespace", "let km = kubernetes_manager_new(\"test\"); km.create_namespace(\"test-ns\");"),
|
||||
("create_pod", "let km = kubernetes_manager_new(\"test\"); km.create_pod(\"test-pod\", \"nginx\", #{});"),
|
||||
("create_service", "let km = kubernetes_manager_new(\"test\"); km.create_service(\"test-svc\", #{}, 80, 80);"),
|
||||
|
||||
// Get methods (object-oriented)
|
||||
("get_pod", "let km = kubernetes_manager_new(\"test\"); km.get_pod(\"test-pod\");"),
|
||||
("get_service", "let km = kubernetes_manager_new(\"test\"); km.get_service(\"test-svc\");"),
|
||||
|
||||
// Delete methods (object-oriented)
|
||||
("delete_pod", "let km = kubernetes_manager_new(\"test\"); km.delete_pod(\"test-pod\");"),
|
||||
("delete_service", "let km = kubernetes_manager_new(\"test\"); km.delete_service(\"test-service\");"),
|
||||
("delete_deployment", "let km = kubernetes_manager_new(\"test\"); km.delete_deployment(\"test-deployment\");"),
|
||||
("delete_namespace", "let km = kubernetes_manager_new(\"test\"); km.delete_namespace(\"test-ns\");"),
|
||||
|
||||
// Utility methods
|
||||
("namespace_exists", "let km = kubernetes_manager_new(\"test\"); km.namespace_exists(\"test-ns\");"),
|
||||
("namespace", "let km = kubernetes_manager_new(\"test\"); namespace(km);"),
|
||||
("delete_pattern", "let km = kubernetes_manager_new(\"test\"); km.delete(\"test-.*\");"),
|
||||
];
|
||||
|
||||
for (function_name, script) in test_scripts {
|
||||
println!("Testing function: {}", function_name);
|
||||
let result = engine.eval::<rhai::Dynamic>(script);
|
||||
|
||||
// The function should be registered (not get a "function not found" error)
|
||||
// It may fail due to no Kubernetes cluster, but that's expected
|
||||
match result {
|
||||
Ok(_) => {
|
||||
println!("Function {} executed successfully", function_name);
|
||||
}
|
||||
Err(e) => {
|
||||
let error_msg = e.to_string();
|
||||
// Should not be a "function not found" error
|
||||
assert!(
|
||||
!error_msg.contains("Function not found")
|
||||
&& !error_msg.contains("Unknown function"),
|
||||
"Function {} not registered: {}",
|
||||
function_name,
|
||||
error_msg
|
||||
);
|
||||
println!(
|
||||
"Function {} failed as expected (no cluster): {}",
|
||||
function_name, error_msg
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_with_real_cluster() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!("Skipping Rhai Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
|
||||
return;
|
||||
}
|
||||
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Test basic functionality with a real cluster
|
||||
let script = r#"
|
||||
let km = kubernetes_manager_new("default");
|
||||
let ns = namespace(km);
|
||||
ns
|
||||
"#;
|
||||
|
||||
let result = engine.eval::<String>(script);
|
||||
match result {
|
||||
Ok(namespace) => {
|
||||
assert_eq!(namespace, "default");
|
||||
println!("Successfully got namespace from Rhai: {}", namespace);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to execute Rhai script with real cluster: {}", e);
|
||||
// Don't fail the test if we can't connect to cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_pods_list() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
let script = r#"
|
||||
let km = kubernetes_manager_new("default");
|
||||
let pods = pods_list(km);
|
||||
pods.len()
|
||||
"#;
|
||||
|
||||
let result = engine.eval::<i64>(script);
|
||||
match result {
|
||||
Ok(count) => {
|
||||
assert!(count >= 0);
|
||||
println!("Successfully listed {} pods from Rhai", count);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to list pods from Rhai: {}", e);
|
||||
// Don't fail the test if we can't connect to cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_resource_counts() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
let script = r#"
|
||||
let km = kubernetes_manager_new("default");
|
||||
let counts = resource_counts(km);
|
||||
counts
|
||||
"#;
|
||||
|
||||
let result = engine.eval::<rhai::Map>(script);
|
||||
match result {
|
||||
Ok(counts) => {
|
||||
println!("Successfully got resource counts from Rhai: {:?}", counts);
|
||||
|
||||
// Verify expected keys are present
|
||||
assert!(counts.contains_key("pods"));
|
||||
assert!(counts.contains_key("services"));
|
||||
assert!(counts.contains_key("deployments"));
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to get resource counts from Rhai: {}", e);
|
||||
// Don't fail the test if we can't connect to cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_namespace_operations() {
|
||||
if !should_run_k8s_tests() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Test namespace existence check
|
||||
let script = r#"
|
||||
let km = kubernetes_manager_new("default");
|
||||
let exists = namespace_exists(km, "default");
|
||||
exists
|
||||
"#;
|
||||
|
||||
let result = engine.eval::<bool>(script);
|
||||
match result {
|
||||
Ok(exists) => {
|
||||
assert!(exists, "Default namespace should exist");
|
||||
println!(
|
||||
"Successfully checked namespace existence from Rhai: {}",
|
||||
exists
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to check namespace existence from Rhai: {}", e);
|
||||
// Don't fail the test if we can't connect to cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_error_handling() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!(
|
||||
"Skipping Rhai error handling tests. Set KUBERNETES_TEST_ENABLED=1 to enable."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Test that errors are properly converted to Rhai errors
|
||||
// Use a namespace that will definitely cause an error when trying to list pods
|
||||
let script = r#"
|
||||
let km = kubernetes_manager_new("nonexistent-namespace-12345");
|
||||
pods_list(km)
|
||||
"#;
|
||||
|
||||
let result = engine.eval::<rhai::Array>(script);
|
||||
|
||||
// The test might succeed if no cluster is available, which is fine
|
||||
match result {
|
||||
Ok(_) => {
|
||||
println!("No error occurred - possibly no cluster available, which is acceptable");
|
||||
}
|
||||
Err(e) => {
|
||||
let error_msg = e.to_string();
|
||||
println!("Got expected error: {}", error_msg);
|
||||
assert!(
|
||||
error_msg.contains("Kubernetes error")
|
||||
|| error_msg.contains("error")
|
||||
|| error_msg.contains("not found")
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_script_files_exist() {
|
||||
// Test that our Rhai test files exist and are readable
|
||||
let test_files = [
|
||||
"tests/rhai/basic_kubernetes.rhai",
|
||||
"tests/rhai/namespace_operations.rhai",
|
||||
"tests/rhai/resource_management.rhai",
|
||||
"tests/rhai/run_all_tests.rhai",
|
||||
];
|
||||
|
||||
for test_file in test_files {
|
||||
let path = Path::new(test_file);
|
||||
assert!(path.exists(), "Rhai test file should exist: {}", test_file);
|
||||
|
||||
// Try to read the file to ensure it's valid
|
||||
let content = fs::read_to_string(path)
|
||||
.unwrap_or_else(|e| panic!("Failed to read {}: {}", test_file, e));
|
||||
|
||||
assert!(
|
||||
!content.is_empty(),
|
||||
"Rhai test file should not be empty: {}",
|
||||
test_file
|
||||
);
|
||||
assert!(
|
||||
content.contains("print("),
|
||||
"Rhai test file should contain print statements: {}",
|
||||
test_file
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_rhai_script_syntax() {
|
||||
// Test that we can at least parse our basic Rhai script
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Simple script that should parse without errors
|
||||
let script = r#"
|
||||
print("Testing Kubernetes Rhai integration");
|
||||
let functions = ["kubernetes_manager_new", "pods_list", "namespace"];
|
||||
for func in functions {
|
||||
print("Function: " + func);
|
||||
}
|
||||
print("Basic syntax test completed");
|
||||
"#;
|
||||
|
||||
let result = engine.eval::<()>(script);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Basic Rhai script should parse and execute: {:?}",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_script_execution_with_cluster() {
|
||||
if !should_run_k8s_tests() {
|
||||
println!(
|
||||
"Skipping Rhai script execution test. Set KUBERNETES_TEST_ENABLED=1 to enable."
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Try to execute a simple script that creates a manager
|
||||
let script = r#"
|
||||
let km = kubernetes_manager_new("default");
|
||||
let ns = namespace(km);
|
||||
print("Created manager for namespace: " + ns);
|
||||
ns
|
||||
"#;
|
||||
|
||||
let result = engine.eval::<String>(script);
|
||||
match result {
|
||||
Ok(namespace) => {
|
||||
assert_eq!(namespace, "default");
|
||||
println!("Successfully executed Rhai script with cluster");
|
||||
}
|
||||
Err(e) => {
|
||||
println!(
|
||||
"Rhai script execution failed (expected if no cluster): {}",
|
||||
e
|
||||
);
|
||||
// Don't fail the test if we can't connect to cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,303 +0,0 @@
|
||||
//! Unit tests for SAL Kubernetes
|
||||
//!
|
||||
//! These tests focus on testing individual components and error handling
|
||||
//! without requiring a live Kubernetes cluster.
|
||||
|
||||
use sal_kubernetes::KubernetesError;
|
||||
|
||||
#[test]
|
||||
fn test_kubernetes_error_creation() {
|
||||
let config_error = KubernetesError::config_error("Test config error");
|
||||
assert!(matches!(config_error, KubernetesError::ConfigError(_)));
|
||||
assert_eq!(
|
||||
config_error.to_string(),
|
||||
"Configuration error: Test config error"
|
||||
);
|
||||
|
||||
let operation_error = KubernetesError::operation_error("Test operation error");
|
||||
assert!(matches!(
|
||||
operation_error,
|
||||
KubernetesError::OperationError(_)
|
||||
));
|
||||
assert_eq!(
|
||||
operation_error.to_string(),
|
||||
"Operation failed: Test operation error"
|
||||
);
|
||||
|
||||
let namespace_error = KubernetesError::namespace_error("Test namespace error");
|
||||
assert!(matches!(
|
||||
namespace_error,
|
||||
KubernetesError::NamespaceError(_)
|
||||
));
|
||||
assert_eq!(
|
||||
namespace_error.to_string(),
|
||||
"Namespace error: Test namespace error"
|
||||
);
|
||||
|
||||
let permission_error = KubernetesError::permission_denied("Test permission error");
|
||||
assert!(matches!(
|
||||
permission_error,
|
||||
KubernetesError::PermissionDenied(_)
|
||||
));
|
||||
assert_eq!(
|
||||
permission_error.to_string(),
|
||||
"Permission denied: Test permission error"
|
||||
);
|
||||
|
||||
let timeout_error = KubernetesError::timeout("Test timeout error");
|
||||
assert!(matches!(timeout_error, KubernetesError::Timeout(_)));
|
||||
assert_eq!(
|
||||
timeout_error.to_string(),
|
||||
"Operation timed out: Test timeout error"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_regex_error_conversion() {
|
||||
use regex::Regex;
|
||||
|
||||
// Test invalid regex pattern
|
||||
let invalid_pattern = "[invalid";
|
||||
let regex_result = Regex::new(invalid_pattern);
|
||||
assert!(regex_result.is_err());
|
||||
|
||||
// Convert to KubernetesError
|
||||
let k8s_error = KubernetesError::from(regex_result.unwrap_err());
|
||||
assert!(matches!(k8s_error, KubernetesError::RegexError(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
let errors = vec![
|
||||
KubernetesError::config_error("Config test"),
|
||||
KubernetesError::operation_error("Operation test"),
|
||||
KubernetesError::namespace_error("Namespace test"),
|
||||
KubernetesError::permission_denied("Permission test"),
|
||||
KubernetesError::timeout("Timeout test"),
|
||||
];
|
||||
|
||||
for error in errors {
|
||||
let error_string = error.to_string();
|
||||
assert!(!error_string.is_empty());
|
||||
assert!(error_string.contains("test"));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rhai")]
|
||||
#[test]
|
||||
fn test_rhai_module_registration() {
|
||||
use rhai::Engine;
|
||||
use sal_kubernetes::rhai::register_kubernetes_module;
|
||||
|
||||
let mut engine = Engine::new();
|
||||
let result = register_kubernetes_module(&mut engine);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Failed to register Kubernetes module: {:?}",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(feature = "rhai")]
|
||||
#[test]
|
||||
fn test_rhai_functions_registered() {
|
||||
use rhai::Engine;
|
||||
use sal_kubernetes::rhai::register_kubernetes_module;
|
||||
|
||||
let mut engine = Engine::new();
|
||||
register_kubernetes_module(&mut engine).unwrap();
|
||||
|
||||
// Test that functions are registered by checking if they exist in the engine
|
||||
// We can't actually call async functions without a runtime, so we just verify registration
|
||||
|
||||
// Check that the main functions are registered by looking for them in the engine
|
||||
let function_names = vec![
|
||||
"kubernetes_manager_new",
|
||||
"pods_list",
|
||||
"services_list",
|
||||
"deployments_list",
|
||||
"delete",
|
||||
"namespace_create",
|
||||
"namespace_exists",
|
||||
];
|
||||
|
||||
for function_name in function_names {
|
||||
// Try to parse a script that references the function
|
||||
// This will succeed if the function is registered, even if we don't call it
|
||||
let script = format!("let f = {};", function_name);
|
||||
let result = engine.compile(&script);
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Function '{}' should be registered in the engine",
|
||||
function_name
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_namespace_validation() {
|
||||
// Test valid namespace names
|
||||
let valid_names = vec!["default", "kube-system", "my-app", "test123"];
|
||||
for name in valid_names {
|
||||
assert!(!name.is_empty());
|
||||
assert!(name.chars().all(|c| c.is_alphanumeric() || c == '-'));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_name_patterns() {
|
||||
use regex::Regex;
|
||||
|
||||
// Test common patterns that might be used with the delete function
|
||||
let patterns = vec![
|
||||
r"test-.*", // Match anything starting with "test-"
|
||||
r".*-temp$", // Match anything ending with "-temp"
|
||||
r"^pod-\d+$", // Match "pod-" followed by digits
|
||||
r"app-[a-z]+", // Match "app-" followed by lowercase letters
|
||||
];
|
||||
|
||||
for pattern in patterns {
|
||||
let regex = Regex::new(pattern);
|
||||
assert!(regex.is_ok(), "Pattern '{}' should be valid", pattern);
|
||||
|
||||
let regex = regex.unwrap();
|
||||
|
||||
// Test some example matches based on the pattern
|
||||
match pattern {
|
||||
r"test-.*" => {
|
||||
assert!(regex.is_match("test-pod"));
|
||||
assert!(regex.is_match("test-service"));
|
||||
assert!(!regex.is_match("prod-pod"));
|
||||
}
|
||||
r".*-temp$" => {
|
||||
assert!(regex.is_match("my-pod-temp"));
|
||||
assert!(regex.is_match("service-temp"));
|
||||
assert!(!regex.is_match("temp-pod"));
|
||||
}
|
||||
r"^pod-\d+$" => {
|
||||
assert!(regex.is_match("pod-123"));
|
||||
assert!(regex.is_match("pod-1"));
|
||||
assert!(!regex.is_match("pod-abc"));
|
||||
assert!(!regex.is_match("service-123"));
|
||||
}
|
||||
r"app-[a-z]+" => {
|
||||
assert!(regex.is_match("app-frontend"));
|
||||
assert!(regex.is_match("app-backend"));
|
||||
assert!(!regex.is_match("app-123"));
|
||||
assert!(!regex.is_match("service-frontend"));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_regex_patterns() {
|
||||
use regex::Regex;
|
||||
|
||||
// Test invalid regex patterns that should fail
|
||||
let invalid_patterns = vec![
|
||||
"[invalid", // Unclosed bracket
|
||||
"*invalid", // Invalid quantifier
|
||||
"(?invalid)", // Invalid group
|
||||
"\\", // Incomplete escape
|
||||
];
|
||||
|
||||
for pattern in invalid_patterns {
|
||||
let regex = Regex::new(pattern);
|
||||
assert!(regex.is_err(), "Pattern '{}' should be invalid", pattern);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_kubernetes_config_creation() {
|
||||
use sal_kubernetes::KubernetesConfig;
|
||||
use std::time::Duration;
|
||||
|
||||
// Test default configuration
|
||||
let default_config = KubernetesConfig::default();
|
||||
assert_eq!(default_config.operation_timeout, Duration::from_secs(30));
|
||||
assert_eq!(default_config.max_retries, 3);
|
||||
assert_eq!(default_config.rate_limit_rps, 10);
|
||||
assert_eq!(default_config.rate_limit_burst, 20);
|
||||
|
||||
// Test custom configuration
|
||||
let custom_config = KubernetesConfig::new()
|
||||
.with_timeout(Duration::from_secs(60))
|
||||
.with_retries(5, Duration::from_secs(2), Duration::from_secs(60))
|
||||
.with_rate_limit(50, 100);
|
||||
|
||||
assert_eq!(custom_config.operation_timeout, Duration::from_secs(60));
|
||||
assert_eq!(custom_config.max_retries, 5);
|
||||
assert_eq!(custom_config.retry_base_delay, Duration::from_secs(2));
|
||||
assert_eq!(custom_config.retry_max_delay, Duration::from_secs(60));
|
||||
assert_eq!(custom_config.rate_limit_rps, 50);
|
||||
assert_eq!(custom_config.rate_limit_burst, 100);
|
||||
|
||||
// Test pre-configured profiles
|
||||
let high_throughput = KubernetesConfig::high_throughput();
|
||||
assert_eq!(high_throughput.rate_limit_rps, 50);
|
||||
assert_eq!(high_throughput.rate_limit_burst, 100);
|
||||
|
||||
let low_latency = KubernetesConfig::low_latency();
|
||||
assert_eq!(low_latency.operation_timeout, Duration::from_secs(10));
|
||||
assert_eq!(low_latency.max_retries, 2);
|
||||
|
||||
let development = KubernetesConfig::development();
|
||||
assert_eq!(development.operation_timeout, Duration::from_secs(120));
|
||||
assert_eq!(development.rate_limit_rps, 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retryable_error_detection() {
|
||||
use kube::Error as KubeError;
|
||||
use sal_kubernetes::kubernetes_manager::is_retryable_error;
|
||||
|
||||
// Test that the function exists and works with basic error types
|
||||
// Note: We can't easily create all error types, so we test what we can
|
||||
|
||||
// Test API errors with different status codes
|
||||
let api_error_500 = KubeError::Api(kube::core::ErrorResponse {
|
||||
status: "Failure".to_string(),
|
||||
message: "Internal server error".to_string(),
|
||||
reason: "InternalError".to_string(),
|
||||
code: 500,
|
||||
});
|
||||
assert!(
|
||||
is_retryable_error(&api_error_500),
|
||||
"500 errors should be retryable"
|
||||
);
|
||||
|
||||
let api_error_429 = KubeError::Api(kube::core::ErrorResponse {
|
||||
status: "Failure".to_string(),
|
||||
message: "Too many requests".to_string(),
|
||||
reason: "TooManyRequests".to_string(),
|
||||
code: 429,
|
||||
});
|
||||
assert!(
|
||||
is_retryable_error(&api_error_429),
|
||||
"429 errors should be retryable"
|
||||
);
|
||||
|
||||
let api_error_404 = KubeError::Api(kube::core::ErrorResponse {
|
||||
status: "Failure".to_string(),
|
||||
message: "Not found".to_string(),
|
||||
reason: "NotFound".to_string(),
|
||||
code: 404,
|
||||
});
|
||||
assert!(
|
||||
!is_retryable_error(&api_error_404),
|
||||
"404 errors should not be retryable"
|
||||
);
|
||||
|
||||
let api_error_400 = KubeError::Api(kube::core::ErrorResponse {
|
||||
status: "Failure".to_string(),
|
||||
message: "Bad request".to_string(),
|
||||
reason: "BadRequest".to_string(),
|
||||
code: 400,
|
||||
});
|
||||
assert!(
|
||||
!is_retryable_error(&api_error_400),
|
||||
"400 errors should not be retryable"
|
||||
);
|
||||
}
|
||||
@@ -1,16 +1,7 @@
|
||||
# SAL Mycelium (`sal-mycelium`)
|
||||
# SAL Mycelium
|
||||
|
||||
A Rust client library for interacting with Mycelium node's HTTP API, with Rhai scripting support.
|
||||
|
||||
## Installation
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-mycelium = "0.1.0"
|
||||
```
|
||||
|
||||
## Overview
|
||||
|
||||
SAL Mycelium provides async HTTP client functionality for managing Mycelium nodes, including:
|
||||
|
||||
@@ -1,16 +1,7 @@
|
||||
# SAL Network Package (`sal-net`)
|
||||
# SAL Network Package
|
||||
|
||||
Network connectivity utilities for TCP, HTTP, and SSH operations.
|
||||
|
||||
## Installation
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-net = "0.1.0"
|
||||
```
|
||||
|
||||
## Overview
|
||||
|
||||
The `sal-net` package provides a comprehensive set of network connectivity tools for the SAL (System Abstraction Layer) ecosystem. It includes utilities for TCP port checking, HTTP/HTTPS connectivity testing, and SSH command execution.
|
||||
|
||||
@@ -165,18 +165,9 @@ fn test_mv() {
|
||||
|
||||
#[test]
|
||||
fn test_which() {
|
||||
// Test with a command that should exist on all systems
|
||||
#[cfg(target_os = "windows")]
|
||||
let existing_cmd = "cmd";
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
let existing_cmd = "ls";
|
||||
|
||||
let result = fs::which(existing_cmd);
|
||||
assert!(
|
||||
!result.is_empty(),
|
||||
"Command '{}' should exist",
|
||||
existing_cmd
|
||||
);
|
||||
// Test with a command that should exist on most systems
|
||||
let result = fs::which("ls");
|
||||
assert!(!result.is_empty());
|
||||
|
||||
// Test with a command that shouldn't exist
|
||||
let result = fs::which("nonexistentcommand12345");
|
||||
|
||||
@@ -1,16 +1,7 @@
|
||||
# SAL PostgreSQL Client (`sal-postgresclient`)
|
||||
# SAL PostgreSQL Client
|
||||
|
||||
The SAL PostgreSQL Client (`sal-postgresclient`) is an independent package that provides a simple and efficient way to interact with PostgreSQL databases in Rust. It offers connection management, query execution, a builder pattern for flexible configuration, and PostgreSQL installer functionality using nerdctl.
|
||||
|
||||
## Installation
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-postgresclient = "0.1.0"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Connection Management**: Automatic connection handling and reconnection
|
||||
|
||||
@@ -17,7 +17,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-process = "0.1.0"
|
||||
sal-process = { path = "../process" }
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -138,12 +138,7 @@ fn test_run_with_environment_variables() {
|
||||
#[test]
|
||||
fn test_run_with_working_directory() {
|
||||
// Test that commands run in the current working directory
|
||||
#[cfg(target_os = "windows")]
|
||||
let result = run_command("cd").unwrap();
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
let result = run_command("pwd").unwrap();
|
||||
|
||||
assert!(result.success);
|
||||
assert!(!result.stdout.is_empty());
|
||||
}
|
||||
@@ -205,16 +200,6 @@ fn test_run_script_with_variables() {
|
||||
|
||||
#[test]
|
||||
fn test_run_script_with_conditionals() {
|
||||
#[cfg(target_os = "windows")]
|
||||
let script = r#"
|
||||
if "hello"=="hello" (
|
||||
echo Condition passed
|
||||
) else (
|
||||
echo Condition failed
|
||||
)
|
||||
"#;
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
let script = r#"
|
||||
if [ "hello" = "hello" ]; then
|
||||
echo "Condition passed"
|
||||
@@ -230,14 +215,6 @@ fn test_run_script_with_conditionals() {
|
||||
|
||||
#[test]
|
||||
fn test_run_script_with_loops() {
|
||||
#[cfg(target_os = "windows")]
|
||||
let script = r#"
|
||||
for %%i in (1 2 3) do (
|
||||
echo Number: %%i
|
||||
)
|
||||
"#;
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
let script = r#"
|
||||
for i in 1 2 3; do
|
||||
echo "Number: $i"
|
||||
|
||||
@@ -1,16 +1,7 @@
|
||||
# SAL Redis Client (`sal-redisclient`)
|
||||
# Redis Client Module
|
||||
|
||||
A robust Redis client wrapper for Rust applications that provides connection management, automatic reconnection, and a simple interface for executing Redis commands.
|
||||
|
||||
## Installation
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-redisclient = "0.1.0"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Singleton Pattern**: Maintains a global Redis client instance, so we don't re-int all the time.
|
||||
|
||||
@@ -29,12 +29,6 @@ sal-mycelium = { path = "../mycelium" }
|
||||
sal-text = { path = "../text" }
|
||||
sal-net = { path = "../net" }
|
||||
sal-zinit-client = { path = "../zinit_client" }
|
||||
sal-kubernetes = { path = "../kubernetes" }
|
||||
sal-service-manager = { path = "../service_manager", features = ["rhai"] }
|
||||
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -1,16 +1,7 @@
|
||||
# SAL Rhai - Rhai Integration Module (`sal-rhai`)
|
||||
# SAL Rhai - Rhai Integration Module
|
||||
|
||||
The `sal-rhai` package provides Rhai scripting integration for the SAL (System Abstraction Layer) ecosystem. This package serves as the central integration point that registers all SAL modules with the Rhai scripting engine, enabling powerful automation and scripting capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-rhai = "0.1.0"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Module Registration**: Automatically registers all SAL packages with Rhai engine
|
||||
|
||||
@@ -96,16 +96,8 @@ pub use sal_text::rhai::register_text_module;
|
||||
// Re-export net module
|
||||
pub use sal_net::rhai::register_net_module;
|
||||
|
||||
// Re-export crypto module - TEMPORARILY DISABLED
|
||||
// TODO: Implement rhai module for Lee's vault implementation
|
||||
// pub use sal_vault::rhai::register_crypto_module;
|
||||
|
||||
// Re-export kubernetes module
|
||||
pub use sal_kubernetes::rhai::register_kubernetes_module;
|
||||
pub use sal_kubernetes::KubernetesManager;
|
||||
|
||||
// Re-export service manager module
|
||||
pub use sal_service_manager::rhai::register_service_manager_module;
|
||||
// Re-export crypto module
|
||||
pub use sal_vault::rhai::register_crypto_module;
|
||||
|
||||
// Rename copy functions to avoid conflicts
|
||||
pub use sal_os::rhai::copy as os_copy;
|
||||
@@ -159,12 +151,8 @@ pub fn register(engine: &mut Engine) -> Result<(), Box<rhai::EvalAltResult>> {
|
||||
|
||||
// RFS module functions are now registered as part of sal_virt above
|
||||
|
||||
// Register Crypto module functions - TEMPORARILY DISABLED
|
||||
// TODO: Implement rhai module for Lee's vault implementation
|
||||
// register_crypto_module(engine)?;
|
||||
|
||||
// Register Kubernetes module functions
|
||||
register_kubernetes_module(engine)?;
|
||||
// Register Crypto module functions
|
||||
register_crypto_module(engine)?;
|
||||
|
||||
// Register Redis client module functions
|
||||
sal_redisclient::rhai::register_redisclient_module(engine)?;
|
||||
@@ -172,9 +160,6 @@ pub fn register(engine: &mut Engine) -> Result<(), Box<rhai::EvalAltResult>> {
|
||||
// Register PostgreSQL client module functions
|
||||
sal_postgresclient::rhai::register_postgresclient_module(engine)?;
|
||||
|
||||
// Register Service Manager module functions
|
||||
sal_service_manager::rhai::register_service_manager_module(engine)?;
|
||||
|
||||
// Platform functions are now registered by sal-os package
|
||||
|
||||
// Screen module functions are now part of sal-process package
|
||||
|
||||
@@ -216,7 +216,7 @@ fn test_module_registration_functions() {
|
||||
assert!(sal_rhai::register_os_module(&mut engine).is_ok());
|
||||
assert!(sal_rhai::register_process_module(&mut engine).is_ok());
|
||||
assert!(sal_rhai::register_git_module(&mut engine).is_ok());
|
||||
// assert!(sal_rhai::register_crypto_module(&mut engine).is_ok()); // Temporarily disabled
|
||||
assert!(sal_rhai::register_crypto_module(&mut engine).is_ok());
|
||||
assert!(sal_rhai::register_redisclient_module(&mut engine).is_ok());
|
||||
assert!(sal_rhai::register_postgresclient_module(&mut engine).is_ok());
|
||||
assert!(sal_rhai::register_mycelium_module(&mut engine).is_ok());
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
// Service Manager Integration Test
|
||||
// Tests service manager integration with SAL's Rhai engine
|
||||
|
||||
print("🔧 Service Manager Integration Test");
|
||||
print("===================================");
|
||||
|
||||
// Test service manager module availability
|
||||
print("📦 Module Availability Test:");
|
||||
print(" Checking if service_manager module is available...");
|
||||
|
||||
// Note: In actual implementation, this would test the Rhai bindings
|
||||
// For now, we demonstrate the expected API structure
|
||||
|
||||
print(" ✅ Service manager module structure verified");
|
||||
|
||||
// Test service configuration creation
|
||||
print("\n📋 Service Configuration Test:");
|
||||
|
||||
let test_config = #{
|
||||
name: "integration-test-service",
|
||||
binary_path: "/bin/echo",
|
||||
args: ["Integration test running"],
|
||||
working_directory: "/tmp",
|
||||
environment: #{
|
||||
"TEST_MODE": "integration",
|
||||
"LOG_LEVEL": "debug"
|
||||
},
|
||||
auto_restart: false
|
||||
};
|
||||
|
||||
print(` Service Name: ${test_config.name}`);
|
||||
print(` Binary Path: ${test_config.binary_path}`);
|
||||
print(` Arguments: ${test_config.args}`);
|
||||
print(" ✅ Configuration creation successful");
|
||||
|
||||
// Test service manager factory
|
||||
print("\n🏭 Service Manager Factory Test:");
|
||||
print(" Testing create_service_manager()...");
|
||||
|
||||
// In actual implementation:
|
||||
// let manager = create_service_manager();
|
||||
print(" ✅ Service manager creation successful");
|
||||
print(" ✅ Platform detection working");
|
||||
|
||||
// Test service operations
|
||||
print("\n🔄 Service Operations Test:");
|
||||
|
||||
let operations = [
|
||||
"start(config)",
|
||||
"status(service_name)",
|
||||
"logs(service_name, lines)",
|
||||
"list()",
|
||||
"stop(service_name)",
|
||||
"restart(service_name)",
|
||||
"remove(service_name)",
|
||||
"exists(service_name)",
|
||||
"start_and_confirm(config, timeout)"
|
||||
];
|
||||
|
||||
for operation in operations {
|
||||
print(` Testing ${operation}...`);
|
||||
// In actual implementation, these would be real function calls
|
||||
print(` ✅ ${operation} binding verified`);
|
||||
}
|
||||
|
||||
// Test error handling
|
||||
print("\n❌ Error Handling Test:");
|
||||
|
||||
let error_scenarios = [
|
||||
"ServiceNotFound",
|
||||
"ServiceAlreadyExists",
|
||||
"StartFailed",
|
||||
"StopFailed",
|
||||
"RestartFailed",
|
||||
"LogsFailed",
|
||||
"Other"
|
||||
];
|
||||
|
||||
for scenario in error_scenarios {
|
||||
print(` Testing ${scenario} error handling...`);
|
||||
print(` ✅ ${scenario} error properly handled`);
|
||||
}
|
||||
|
||||
// Test platform-specific behavior
|
||||
print("\n🖥️ Platform-Specific Test:");
|
||||
|
||||
print(" macOS (launchctl):");
|
||||
print(" - Plist file generation");
|
||||
print(" - LaunchAgent integration");
|
||||
print(" - User service management");
|
||||
print(" ✅ macOS integration verified");
|
||||
|
||||
print("\n Linux (zinit):");
|
||||
print(" - Socket communication");
|
||||
print(" - JSON configuration");
|
||||
print(" - Lightweight management");
|
||||
print(" ✅ Linux zinit integration verified");
|
||||
|
||||
print("\n Linux (systemd):");
|
||||
print(" - Unit file generation");
|
||||
print(" - Systemctl commands");
|
||||
print(" - Service dependencies");
|
||||
print(" ✅ Linux systemd integration verified");
|
||||
|
||||
// Test circle worker use case
|
||||
print("\n🎯 Circle Worker Use Case Test:");
|
||||
|
||||
let resident_id = "test_resident_001";
|
||||
let worker_config = #{
|
||||
name: `circle-worker-${resident_id}`,
|
||||
binary_path: "/usr/bin/circle-worker",
|
||||
args: ["--resident-id", resident_id],
|
||||
working_directory: `/var/lib/workers/${resident_id}`,
|
||||
environment: #{
|
||||
"RESIDENT_ID": resident_id,
|
||||
"WORKER_TYPE": "circle"
|
||||
},
|
||||
auto_restart: true
|
||||
};
|
||||
|
||||
print(` Circle Worker: ${worker_config.name}`);
|
||||
print(` Resident ID: ${resident_id}`);
|
||||
print(" ✅ Circle worker configuration verified");
|
||||
|
||||
// Test deployment workflow
|
||||
print("\n🚀 Deployment Workflow Test:");
|
||||
|
||||
let workflow_steps = [
|
||||
"1. Create service manager",
|
||||
"2. Check if service exists",
|
||||
"3. Deploy new service",
|
||||
"4. Confirm service running",
|
||||
"5. Monitor service health",
|
||||
"6. Handle service updates",
|
||||
"7. Clean up on removal"
|
||||
];
|
||||
|
||||
for step in workflow_steps {
|
||||
print(` ${step}`);
|
||||
}
|
||||
print(" ✅ Complete deployment workflow verified");
|
||||
|
||||
// Test integration with SAL ecosystem
|
||||
print("\n🌐 SAL Ecosystem Integration Test:");
|
||||
|
||||
print(" Integration Points:");
|
||||
print(" - SAL core error handling");
|
||||
print(" - SAL logging framework");
|
||||
print(" - SAL configuration management");
|
||||
print(" - SAL monitoring integration");
|
||||
print(" ✅ SAL ecosystem integration verified");
|
||||
|
||||
// Test performance considerations
|
||||
print("\n⚡ Performance Test:");
|
||||
|
||||
print(" Performance Metrics:");
|
||||
print(" - Service startup time: < 2 seconds");
|
||||
print(" - Status check time: < 100ms");
|
||||
print(" - Log retrieval time: < 500ms");
|
||||
print(" - Service list time: < 200ms");
|
||||
print(" ✅ Performance requirements met");
|
||||
|
||||
// Test security considerations
|
||||
print("\n🔒 Security Test:");
|
||||
|
||||
print(" Security Features:");
|
||||
print(" - Service isolation");
|
||||
print(" - Permission validation");
|
||||
print(" - Secure communication");
|
||||
print(" - Access control");
|
||||
print(" ✅ Security requirements verified");
|
||||
|
||||
print("\n✅ Service Manager Integration Test Complete");
|
||||
print(" All integration points verified");
|
||||
print(" Ready for production use with SAL");
|
||||
print(" Circle worker deployment fully supported");
|
||||
@@ -41,9 +41,6 @@ fn run_test_file(file_name, description, results) {
|
||||
// Test 3: Module Integration Tests
|
||||
// run_test_file("03_module_integration.rhai", "Module Integration Tests", test_results);
|
||||
|
||||
// Test 4: Service Manager Integration Tests
|
||||
// run_test_file("04_service_manager_integration.rhai", "Service Manager Integration Tests", test_results);
|
||||
|
||||
// Additional inline tests for core functionality
|
||||
print("🔧 Core Integration Verification");
|
||||
print("--------------------------------------------------");
|
||||
|
||||
@@ -21,12 +21,8 @@ fn assert_eq(actual, expected, message) {
|
||||
|
||||
// Helper function to check if buildah is available
|
||||
fn is_buildah_available() {
|
||||
try {
|
||||
let result = run("which buildah");
|
||||
return result.success;
|
||||
} catch(err) {
|
||||
return false;
|
||||
}
|
||||
let command = run("which buildah");
|
||||
return command.silent().execute().success;
|
||||
}
|
||||
|
||||
print("=== Testing Buildah Builder Pattern ===");
|
||||
@@ -35,8 +31,7 @@ print("=== Testing Buildah Builder Pattern ===");
|
||||
let buildah_available = is_buildah_available();
|
||||
if !buildah_available {
|
||||
print("Buildah is not available. Skipping Buildah tests.");
|
||||
// Exit gracefully without error
|
||||
return;
|
||||
throw err;
|
||||
}
|
||||
|
||||
print("✓ Buildah is available");
|
||||
@@ -121,7 +116,7 @@ try {
|
||||
// Test committing to an image
|
||||
print("Testing commit()...");
|
||||
let image_name = "rhai_test_image:latest";
|
||||
builder.commit(image_name);
|
||||
builder.commit(image_name, []);
|
||||
print("✓ commit(): Container committed to image successfully");
|
||||
|
||||
// Test removing the container
|
||||
@@ -154,19 +149,21 @@ try {
|
||||
// Clean up in case of error
|
||||
try {
|
||||
// Remove test container if it exists
|
||||
run("buildah rm rhai_test_container");
|
||||
} catch(_) {}
|
||||
let command = run("buildah rm rhai_test_container");
|
||||
command.execute();
|
||||
} catch(err) {}
|
||||
|
||||
try {
|
||||
// Remove test image if it exists
|
||||
run("buildah rmi rhai_test_image:latest");
|
||||
} catch(_) {}
|
||||
let command = run("buildah rmi alpine");
|
||||
command.execute();
|
||||
} catch(err) {}
|
||||
|
||||
try {
|
||||
// Remove test files if they exist
|
||||
delete("test_add_file.txt");
|
||||
delete("test_copy_file.txt");
|
||||
} catch(_) {}
|
||||
} catch(err) {}
|
||||
|
||||
throw err;
|
||||
}
|
||||
|
||||
@@ -21,19 +21,25 @@ fn assert_eq(actual, expected, message) {
|
||||
|
||||
// Helper function to check if buildah is available
|
||||
fn is_buildah_available() {
|
||||
try {
|
||||
let result = run("which buildah");
|
||||
return result.success;
|
||||
} catch(err) {
|
||||
return false;
|
||||
}
|
||||
let command = run("which buildah");
|
||||
return command.silent().execute().success;
|
||||
}
|
||||
|
||||
// Helper function to check if an image exists
|
||||
fn image_exists(image_name) {
|
||||
try {
|
||||
let result = run(`buildah images -q ${image_name}`);
|
||||
return result.success && result.stdout.trim() != "";
|
||||
// First, check for the exact image name
|
||||
let command = run(`buildah images -q ${image_name}`);
|
||||
let result = command.execute();
|
||||
if result.success && result.stdout.trim() != "" {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If not found, check for the localhost-prefixed version
|
||||
let prefixed_image_name = `localhost/${image_name}`;
|
||||
let command = run(`buildah images -q ${prefixed_image_name}`);
|
||||
let result_prefixed = command.execute();
|
||||
return result_prefixed.success && result_prefixed.stdout.trim() != "";
|
||||
} catch(err) {
|
||||
return false;
|
||||
}
|
||||
@@ -45,8 +51,7 @@ print("=== Testing Buildah Image Operations ===");
|
||||
let buildah_available = is_buildah_available();
|
||||
if !buildah_available {
|
||||
print("Buildah is not available. Skipping Buildah tests.");
|
||||
// Exit gracefully without error
|
||||
return;
|
||||
throw err;
|
||||
}
|
||||
|
||||
print("✓ Buildah is available");
|
||||
@@ -82,8 +87,10 @@ try {
|
||||
|
||||
// Find our tagged image
|
||||
let found_tag = false;
|
||||
let expected_tag = "rhai_test_tag:latest";
|
||||
for image in images {
|
||||
if image.names.contains("rhai_test_tag:latest") {
|
||||
// The tag might be prefixed with 'localhost/' if no registry is specified.
|
||||
if image.names.contains(expected_tag) || image.names.contains("localhost/" + expected_tag) {
|
||||
found_tag = true;
|
||||
break;
|
||||
}
|
||||
@@ -95,10 +102,11 @@ try {
|
||||
print("Testing build()...");
|
||||
|
||||
// Create a simple Dockerfile
|
||||
let dockerfile_content = `FROM alpine:latest
|
||||
RUN echo "Hello from Dockerfile" > /hello.txt
|
||||
CMD ["cat", "/hello.txt"]
|
||||
`;
|
||||
let dockerfile_content = `
|
||||
FROM alpine:latest
|
||||
RUN echo "Hello from Dockerfile" > /hello.txt
|
||||
CMD ["cat", "/hello.txt"]
|
||||
`;
|
||||
file_write(`${test_dir}/Dockerfile`, dockerfile_content);
|
||||
|
||||
// Build the image
|
||||
@@ -133,18 +141,23 @@ CMD ["cat", "/hello.txt"]
|
||||
// Clean up in case of error
|
||||
try {
|
||||
// Remove test container if it exists
|
||||
run("buildah rm rhai_test_container");
|
||||
} catch(_) {}
|
||||
let command = run("buildah rm rhai_test_container");
|
||||
command.execute();
|
||||
} catch(err) {}
|
||||
|
||||
try {
|
||||
// Remove test images if they exist
|
||||
run("buildah rmi rhai_test_tag:latest");
|
||||
run("buildah rmi rhai_test_build:latest");
|
||||
} catch(_) {}
|
||||
let command = run("buildah rmi rhai_test_tag:latest");
|
||||
command.execute();
|
||||
let command = run("buildah rmi rhai_test_build:latest");
|
||||
command.execute();
|
||||
} catch(err) {}
|
||||
|
||||
try {
|
||||
// Remove test directory if it exists
|
||||
delete(test_dir);
|
||||
print("✓ Cleanup: Test directory removed");
|
||||
} catch (err) {}
|
||||
|
||||
throw err;
|
||||
} finally {
|
||||
// Clean up test directory
|
||||
delete(test_dir);
|
||||
print("✓ Cleanup: Test directory removed");
|
||||
}
|
||||
|
||||
@@ -21,12 +21,8 @@ fn assert_eq(actual, expected, message) {
|
||||
|
||||
// Helper function to check if buildah is available
|
||||
fn is_buildah_available() {
|
||||
try {
|
||||
let result = run("which buildah");
|
||||
return result.success;
|
||||
} catch(err) {
|
||||
return false;
|
||||
}
|
||||
let command = run("which buildah");
|
||||
return command.silent().execute().success;
|
||||
}
|
||||
|
||||
print("=== Testing Buildah Container Operations ===");
|
||||
@@ -35,8 +31,7 @@ print("=== Testing Buildah Container Operations ===");
|
||||
let buildah_available = is_buildah_available();
|
||||
if !buildah_available {
|
||||
print("Buildah is not available. Skipping Buildah tests.");
|
||||
// Exit gracefully without error
|
||||
return;
|
||||
throw err;
|
||||
}
|
||||
|
||||
print("✓ Buildah is available");
|
||||
@@ -59,10 +54,12 @@ try {
|
||||
|
||||
// Test config
|
||||
print("Testing config()...");
|
||||
let config_options = #{
|
||||
"LABEL": "rhai_test=true",
|
||||
"ENV": "TEST_VAR=test_value"
|
||||
};
|
||||
let config_options = [
|
||||
["label", "rhai_test_true"],
|
||||
["env", "TEST_VAR=test_value"],
|
||||
["env", "ANOTHER_VAR=another_value"],
|
||||
["author", "Rhai Test With Spaces"]
|
||||
];
|
||||
builder.config(config_options);
|
||||
print("✓ config(): Container configured successfully");
|
||||
|
||||
@@ -77,9 +74,10 @@ try {
|
||||
print("Testing content operations...");
|
||||
|
||||
// Write content to a file
|
||||
let script_content = `#!/bin/sh
|
||||
echo "Hello from script"
|
||||
`;
|
||||
let script_content = `
|
||||
#!/bin/sh
|
||||
echo "Hello from script"
|
||||
`;
|
||||
builder.write_content(script_content, "/script.sh");
|
||||
|
||||
// Make the script executable
|
||||
@@ -91,14 +89,10 @@ echo "Hello from script"
|
||||
assert_true(script_result.stdout.contains("Hello from script"), "Script output should contain expected text");
|
||||
print("✓ Content operations: Script created and executed successfully");
|
||||
|
||||
// Test commit with config
|
||||
print("Testing commit with config...");
|
||||
let commit_options = #{
|
||||
"author": "Rhai Test",
|
||||
"message": "Test commit"
|
||||
};
|
||||
builder.commit("rhai_test_commit:latest", commit_options);
|
||||
print("✓ commit(): Container committed with config successfully");
|
||||
// Test commit
|
||||
print("Testing commit...");
|
||||
builder.commit("rhai_test_commit:latest", [["q", ""]]);
|
||||
print("✓ commit(): Container committed successfully");
|
||||
|
||||
// Clean up
|
||||
builder.remove();
|
||||
@@ -115,13 +109,15 @@ echo "Hello from script"
|
||||
// Clean up in case of error
|
||||
try {
|
||||
// Remove test container if it exists
|
||||
run("buildah rm rhai_test_container");
|
||||
} catch(_) {}
|
||||
let command = run("buildah rm rhai_test_container");
|
||||
command.execute();
|
||||
} catch(err) {}
|
||||
|
||||
try {
|
||||
// Remove test image if it exists
|
||||
run("buildah rmi rhai_test_commit:latest");
|
||||
} catch(_) {}
|
||||
let command = run("buildah rmi rhai_test_commit:latest");
|
||||
command.execute();
|
||||
} catch(err) {}
|
||||
|
||||
throw err;
|
||||
}
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
// run_all_tests.rhai
|
||||
// Runs all Buildah module tests
|
||||
|
||||
print("=== Running Buildah Module Tests ===");
|
||||
|
||||
// Custom assert function
|
||||
fn assert_true(condition, message) {
|
||||
if !condition {
|
||||
print(`ASSERTION FAILED: ${message}`);
|
||||
throw message;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to check if buildah is available
|
||||
fn is_buildah_available() {
|
||||
try {
|
||||
let result = run("which buildah");
|
||||
return result.success;
|
||||
} catch(e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Run each test directly
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
let skipped = 0;
|
||||
let total = 0;
|
||||
|
||||
// Check if buildah is available
|
||||
let buildah_available = is_buildah_available();
|
||||
if !buildah_available {
|
||||
print("Buildah is not available. Skipping all Buildah tests.");
|
||||
skipped = 3; // Skip all three tests
|
||||
total = 3;
|
||||
} else {
|
||||
// Test 1: Builder Pattern
|
||||
print("\n--- Running Builder Pattern Tests ---");
|
||||
try {
|
||||
// Create a builder
|
||||
let builder = bah_new("rhai_test_container", "alpine:latest");
|
||||
|
||||
// Test basic properties
|
||||
assert_true(builder.container_id != "", "Container ID should not be empty");
|
||||
assert_true(builder.name == "rhai_test_container", "Container name should match");
|
||||
|
||||
// Run a simple command
|
||||
let result = builder.run("echo 'Hello from container'");
|
||||
assert_true(result.success, "Command should succeed");
|
||||
|
||||
// Clean up
|
||||
builder.remove();
|
||||
|
||||
print("--- Builder Pattern Tests completed successfully ---");
|
||||
passed += 1;
|
||||
} catch(err) {
|
||||
print(`!!! Error in Builder Pattern Tests: ${err}`);
|
||||
failed += 1;
|
||||
|
||||
// Clean up in case of error
|
||||
try {
|
||||
run("buildah rm rhai_test_container");
|
||||
} catch(e) {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
}
|
||||
total += 1;
|
||||
|
||||
// Test 2: Image Operations
|
||||
print("\n--- Running Image Operations Tests ---");
|
||||
try {
|
||||
// Create a temporary directory for testing
|
||||
let test_dir = "rhai_test_buildah";
|
||||
mkdir(test_dir);
|
||||
|
||||
// Create a builder
|
||||
let builder = bah_new("rhai_test_container", "alpine:latest");
|
||||
|
||||
// List images
|
||||
let images = builder.images();
|
||||
assert_true(images.len() > 0, "There should be at least one image");
|
||||
|
||||
// Clean up
|
||||
builder.remove();
|
||||
delete(test_dir);
|
||||
|
||||
print("--- Image Operations Tests completed successfully ---");
|
||||
passed += 1;
|
||||
} catch(err) {
|
||||
print(`!!! Error in Image Operations Tests: ${err}`);
|
||||
failed += 1;
|
||||
|
||||
// Clean up in case of error
|
||||
try {
|
||||
run("buildah rm rhai_test_container");
|
||||
delete("rhai_test_buildah");
|
||||
} catch(e) {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
}
|
||||
total += 1;
|
||||
|
||||
// Test 3: Container Operations
|
||||
print("\n--- Running Container Operations Tests ---");
|
||||
try {
|
||||
// Create a builder
|
||||
let builder = bah_new("rhai_test_container", "alpine:latest");
|
||||
|
||||
// Test reset
|
||||
builder.reset();
|
||||
|
||||
// Create a new container
|
||||
builder = bah_new("rhai_test_container", "alpine:latest");
|
||||
|
||||
// Run a command
|
||||
let result = builder.run("echo 'Hello from container'");
|
||||
assert_true(result.success, "Command should succeed");
|
||||
|
||||
// Clean up
|
||||
builder.remove();
|
||||
|
||||
print("--- Container Operations Tests completed successfully ---");
|
||||
passed += 1;
|
||||
} catch(err) {
|
||||
print(`!!! Error in Container Operations Tests: ${err}`);
|
||||
failed += 1;
|
||||
|
||||
// Clean up in case of error
|
||||
try {
|
||||
run("buildah rm rhai_test_container");
|
||||
} catch(e) {
|
||||
// Ignore errors during cleanup
|
||||
}
|
||||
}
|
||||
total += 1;
|
||||
}
|
||||
|
||||
print("\n=== Test Summary ===");
|
||||
print(`Passed: ${passed}`);
|
||||
print(`Failed: ${failed}`);
|
||||
print(`Skipped: ${skipped}`);
|
||||
print(`Total: ${total}`);
|
||||
|
||||
if failed == 0 {
|
||||
if skipped > 0 {
|
||||
print("\n⚠️ All tests skipped or passed!");
|
||||
} else {
|
||||
print("\n✅ All tests passed!");
|
||||
}
|
||||
} else {
|
||||
print("\n❌ Some tests failed!");
|
||||
}
|
||||
|
||||
// Return the number of failed tests (0 means success)
|
||||
failed;
|
||||
@@ -1,150 +0,0 @@
|
||||
// Test 1: Namespace Operations
|
||||
// This test covers namespace creation, existence checking, and listing
|
||||
|
||||
// Helper function to generate timestamp for unique names
|
||||
fn timestamp() {
|
||||
let now = 1640995200; // Base timestamp
|
||||
let random = (now % 1000000).to_string();
|
||||
random
|
||||
}
|
||||
|
||||
print("=== Kubernetes Namespace Operations Test ===");
|
||||
print("");
|
||||
|
||||
// Test namespace creation and existence checking
|
||||
print("Test 1: Namespace Creation and Existence");
|
||||
print("----------------------------------------");
|
||||
|
||||
// Create a test namespace
|
||||
let test_namespace = "sal-test-ns-" + timestamp();
|
||||
print("Creating test namespace: " + test_namespace);
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
// Check if namespace exists before creation
|
||||
let exists_before = km.namespace_exists(test_namespace);
|
||||
print("Namespace exists before creation: " + exists_before);
|
||||
|
||||
if exists_before {
|
||||
print("⚠️ Namespace already exists, this is unexpected");
|
||||
} else {
|
||||
print("✅ Namespace doesn't exist yet (expected)");
|
||||
}
|
||||
|
||||
// Create the namespace
|
||||
print("Creating namespace...");
|
||||
km.create_namespace(test_namespace);
|
||||
print("✅ Namespace created successfully");
|
||||
|
||||
// Check if namespace exists after creation
|
||||
let exists_after = km.namespace_exists(test_namespace);
|
||||
print("Namespace exists after creation: " + exists_after);
|
||||
|
||||
if exists_after {
|
||||
print("✅ Namespace exists after creation (expected)");
|
||||
} else {
|
||||
print("❌ Namespace doesn't exist after creation (unexpected)");
|
||||
throw "Namespace creation verification failed";
|
||||
}
|
||||
|
||||
// Test idempotent creation (should not error)
|
||||
print("Testing idempotent creation...");
|
||||
km.create_namespace(test_namespace);
|
||||
print("✅ Idempotent creation successful");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Namespace creation test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test namespace listing
|
||||
print("Test 2: Namespace Listing");
|
||||
print("-------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
// List all namespaces
|
||||
let namespaces = km.namespaces_list();
|
||||
print("Found " + namespaces.len() + " namespaces");
|
||||
|
||||
if namespaces.len() == 0 {
|
||||
print("⚠️ No namespaces found, this might indicate a connection issue");
|
||||
} else {
|
||||
print("✅ Successfully retrieved namespace list");
|
||||
|
||||
// Check if our test namespace is in the list
|
||||
let found_test_ns = false;
|
||||
for ns in namespaces {
|
||||
if ns.name == test_namespace {
|
||||
found_test_ns = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if found_test_ns {
|
||||
print("✅ Test namespace found in namespace list");
|
||||
} else {
|
||||
print("⚠️ Test namespace not found in list (might be propagation delay)");
|
||||
}
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Namespace listing test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test namespace manager creation
|
||||
print("Test 3: Namespace Manager Creation");
|
||||
print("----------------------------------");
|
||||
|
||||
try {
|
||||
// Create manager for our test namespace
|
||||
let test_km = kubernetes_manager_new(test_namespace);
|
||||
|
||||
// Verify the manager's namespace
|
||||
let manager_namespace = namespace(test_km);
|
||||
print("Manager namespace: " + manager_namespace);
|
||||
|
||||
if manager_namespace == test_namespace {
|
||||
print("✅ Manager created for correct namespace");
|
||||
} else {
|
||||
print("❌ Manager namespace mismatch");
|
||||
throw "Manager namespace verification failed";
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Namespace manager creation test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Cleanup
|
||||
print("Test 4: Namespace Cleanup");
|
||||
print("-------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
// Delete the test namespace
|
||||
print("Deleting test namespace: " + test_namespace);
|
||||
km.delete_namespace(test_namespace);
|
||||
print("✅ Namespace deletion initiated");
|
||||
|
||||
// Note: Namespace deletion is asynchronous, so we don't immediately check existence
|
||||
print("ℹ️ Namespace deletion is asynchronous and may take time to complete");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Namespace cleanup failed: " + error);
|
||||
// Don't throw here as this is cleanup
|
||||
}
|
||||
|
||||
print("");
|
||||
print("=== Namespace Operations Test Complete ===");
|
||||
print("✅ All namespace operation tests passed");
|
||||
@@ -1,215 +0,0 @@
|
||||
// Test 2: Pod Management Operations
|
||||
// This test covers pod creation, listing, retrieval, and deletion
|
||||
|
||||
// Helper function to generate timestamp for unique names
|
||||
fn timestamp() {
|
||||
let now = 1640995200; // Base timestamp
|
||||
let random = (now % 1000000).to_string();
|
||||
random
|
||||
}
|
||||
|
||||
print("=== Kubernetes Pod Management Test ===");
|
||||
print("");
|
||||
|
||||
// Setup test namespace
|
||||
let test_namespace = "sal-test-pods-" + timestamp();
|
||||
print("Setting up test namespace: " + test_namespace);
|
||||
|
||||
try {
|
||||
let setup_km = kubernetes_manager_new("default");
|
||||
setup_km.create_namespace(test_namespace);
|
||||
print("✅ Test namespace created");
|
||||
} catch (error) {
|
||||
print("❌ Failed to create test namespace: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Create manager for test namespace
|
||||
let km = kubernetes_manager_new(test_namespace);
|
||||
|
||||
print("");
|
||||
|
||||
// Test pod listing (should be empty initially)
|
||||
print("Test 1: Initial Pod Listing");
|
||||
print("---------------------------");
|
||||
|
||||
try {
|
||||
let initial_pods = km.pods_list();
|
||||
print("Initial pod count: " + initial_pods.len());
|
||||
|
||||
if initial_pods.len() == 0 {
|
||||
print("✅ Namespace is empty as expected");
|
||||
} else {
|
||||
print("⚠️ Found " + initial_pods.len() + " existing pods in test namespace");
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Initial pod listing failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test pod creation
|
||||
print("Test 2: Pod Creation");
|
||||
print("-------------------");
|
||||
|
||||
let test_pod_name = "test-pod-" + timestamp();
|
||||
let test_image = "nginx:alpine";
|
||||
let test_labels = #{
|
||||
"app": "test",
|
||||
"environment": "testing",
|
||||
"created-by": "sal-integration-test"
|
||||
};
|
||||
|
||||
try {
|
||||
print("Creating pod: " + test_pod_name);
|
||||
print("Image: " + test_image);
|
||||
print("Labels: " + test_labels);
|
||||
|
||||
let created_pod = km.create_pod(test_pod_name, test_image, test_labels);
|
||||
print("✅ Pod created successfully");
|
||||
|
||||
// Verify pod name
|
||||
if created_pod.name == test_pod_name {
|
||||
print("✅ Pod name matches expected: " + created_pod.name);
|
||||
} else {
|
||||
print("❌ Pod name mismatch. Expected: " + test_pod_name + ", Got: " + created_pod.name);
|
||||
throw "Pod name verification failed";
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Pod creation failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test pod listing after creation
|
||||
print("Test 3: Pod Listing After Creation");
|
||||
print("----------------------------------");
|
||||
|
||||
try {
|
||||
let pods_after_creation = km.pods_list();
|
||||
print("Pod count after creation: " + pods_after_creation.len());
|
||||
|
||||
if pods_after_creation.len() > 0 {
|
||||
print("✅ Pods found after creation");
|
||||
|
||||
// Find our test pod
|
||||
let found_test_pod = false;
|
||||
for pod in pods_after_creation {
|
||||
if pod.name == test_pod_name {
|
||||
found_test_pod = true;
|
||||
print("✅ Test pod found in list: " + pod.name);
|
||||
print(" Status: " + pod.status);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !found_test_pod {
|
||||
print("❌ Test pod not found in pod list");
|
||||
throw "Test pod not found in listing";
|
||||
}
|
||||
|
||||
} else {
|
||||
print("❌ No pods found after creation");
|
||||
throw "Pod listing verification failed";
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Pod listing after creation failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test pod retrieval
|
||||
print("Test 4: Individual Pod Retrieval");
|
||||
print("--------------------------------");
|
||||
|
||||
try {
|
||||
let retrieved_pod = km.get_pod(test_pod_name);
|
||||
print("✅ Pod retrieved successfully");
|
||||
print("Pod name: " + retrieved_pod.name);
|
||||
print("Pod status: " + retrieved_pod.status);
|
||||
|
||||
if retrieved_pod.name == test_pod_name {
|
||||
print("✅ Retrieved pod name matches expected");
|
||||
} else {
|
||||
print("❌ Retrieved pod name mismatch");
|
||||
throw "Pod retrieval verification failed";
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Pod retrieval failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test resource counts
|
||||
print("Test 5: Resource Counts");
|
||||
print("-----------------------");
|
||||
|
||||
try {
|
||||
let counts = km.resource_counts();
|
||||
print("Resource counts: " + counts);
|
||||
|
||||
if counts.pods >= 1 {
|
||||
print("✅ Pod count reflects created pod: " + counts.pods);
|
||||
} else {
|
||||
print("⚠️ Pod count doesn't reflect created pod: " + counts.pods);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Resource counts failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test pod deletion
|
||||
print("Test 6: Pod Deletion");
|
||||
print("--------------------");
|
||||
|
||||
try {
|
||||
print("Deleting pod: " + test_pod_name);
|
||||
km.delete_pod(test_pod_name);
|
||||
print("✅ Pod deletion initiated");
|
||||
|
||||
// Wait a moment for deletion to propagate
|
||||
print("Waiting for deletion to propagate...");
|
||||
|
||||
// Check if pod is gone (may take time)
|
||||
try {
|
||||
let deleted_pod = km.get_pod(test_pod_name);
|
||||
print("⚠️ Pod still exists after deletion (may be terminating): " + deleted_pod.status);
|
||||
} catch (get_error) {
|
||||
print("✅ Pod no longer retrievable (deletion successful)");
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Pod deletion failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Cleanup
|
||||
print("Test 7: Cleanup");
|
||||
print("---------------");
|
||||
|
||||
try {
|
||||
let cleanup_km = kubernetes_manager_new("default");
|
||||
cleanup_km.delete_namespace(test_namespace);
|
||||
print("✅ Test namespace cleanup initiated");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Cleanup failed: " + error);
|
||||
// Don't throw here as this is cleanup
|
||||
}
|
||||
|
||||
print("");
|
||||
print("=== Pod Management Test Complete ===");
|
||||
print("✅ All pod management tests passed");
|
||||
@@ -1,290 +0,0 @@
|
||||
// Test 3: PCRE Pattern Matching for Bulk Operations
|
||||
// This test covers the powerful pattern-based deletion functionality
|
||||
|
||||
// Helper function to generate timestamp for unique names
|
||||
fn timestamp() {
|
||||
let now = 1640995200; // Base timestamp
|
||||
let random = (now % 1000000).to_string();
|
||||
random
|
||||
}
|
||||
|
||||
print("=== Kubernetes PCRE Pattern Matching Test ===");
|
||||
print("");
|
||||
|
||||
// Setup test namespace
|
||||
let test_namespace = "sal-test-patterns-" + timestamp();
|
||||
print("Setting up test namespace: " + test_namespace);
|
||||
|
||||
try {
|
||||
let setup_km = kubernetes_manager_new("default");
|
||||
setup_km.create_namespace(test_namespace);
|
||||
print("✅ Test namespace created");
|
||||
} catch (error) {
|
||||
print("❌ Failed to create test namespace: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Create manager for test namespace
|
||||
let km = kubernetes_manager_new(test_namespace);
|
||||
|
||||
print("");
|
||||
|
||||
// Create multiple test resources with different naming patterns
|
||||
print("Test 1: Creating Test Resources");
|
||||
print("------------------------------");
|
||||
|
||||
let test_resources = [
|
||||
"test-app-frontend",
|
||||
"test-app-backend",
|
||||
"test-app-database",
|
||||
"prod-app-frontend",
|
||||
"prod-app-backend",
|
||||
"staging-service",
|
||||
"dev-service",
|
||||
"temp-worker-1",
|
||||
"temp-worker-2",
|
||||
"permanent-service"
|
||||
];
|
||||
|
||||
try {
|
||||
print("Creating " + test_resources.len() + " test pods...");
|
||||
|
||||
for resource_name in test_resources {
|
||||
let labels = #{
|
||||
"app": resource_name,
|
||||
"test": "pattern-matching",
|
||||
"created-by": "sal-integration-test"
|
||||
};
|
||||
|
||||
km.create_pod(resource_name, "nginx:alpine", labels);
|
||||
print(" ✅ Created: " + resource_name);
|
||||
}
|
||||
|
||||
print("✅ All test resources created");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Test resource creation failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Verify all resources exist
|
||||
print("Test 2: Verify Resource Creation");
|
||||
print("--------------------------------");
|
||||
|
||||
try {
|
||||
let all_pods = km.pods_list();
|
||||
print("Total pods created: " + all_pods.len());
|
||||
|
||||
if all_pods.len() >= test_resources.len() {
|
||||
print("✅ Expected number of pods found");
|
||||
} else {
|
||||
print("❌ Missing pods. Expected: " + test_resources.len() + ", Found: " + all_pods.len());
|
||||
throw "Resource verification failed";
|
||||
}
|
||||
|
||||
// List all pod names for verification
|
||||
print("Created pods:");
|
||||
for pod in all_pods {
|
||||
print(" - " + pod.name);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Resource verification failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test pattern matching - delete all "test-app-*" resources
|
||||
print("Test 3: Pattern Deletion - test-app-*");
|
||||
print("--------------------------------------");
|
||||
|
||||
try {
|
||||
let pattern = "test-app-.*";
|
||||
print("Deleting resources matching pattern: " + pattern);
|
||||
|
||||
// Count pods before deletion
|
||||
let pods_before = km.pods_list();
|
||||
let count_before = pods_before.len();
|
||||
print("Pods before deletion: " + count_before);
|
||||
|
||||
// Perform pattern deletion
|
||||
km.delete(pattern);
|
||||
print("✅ Pattern deletion executed");
|
||||
|
||||
// Wait for deletion to propagate
|
||||
print("Waiting for deletion to propagate...");
|
||||
|
||||
// Count pods after deletion
|
||||
let pods_after = km.pods_list();
|
||||
let count_after = pods_after.len();
|
||||
print("Pods after deletion: " + count_after);
|
||||
|
||||
// Should have deleted 3 pods (test-app-frontend, test-app-backend, test-app-database)
|
||||
let expected_deleted = 3;
|
||||
let actual_deleted = count_before - count_after;
|
||||
|
||||
if actual_deleted >= expected_deleted {
|
||||
print("✅ Pattern deletion successful. Deleted " + actual_deleted + " pods");
|
||||
} else {
|
||||
print("⚠️ Pattern deletion may still be propagating. Expected to delete " + expected_deleted + ", deleted " + actual_deleted);
|
||||
}
|
||||
|
||||
// Verify specific pods are gone
|
||||
print("Remaining pods:");
|
||||
for pod in pods_after {
|
||||
print(" - " + pod.name);
|
||||
|
||||
// Check that no test-app-* pods remain
|
||||
if pod.name.starts_with("test-app-") {
|
||||
print("❌ Found test-app pod that should have been deleted: " + pod.name);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Pattern deletion test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test more specific pattern - delete all "temp-*" resources
|
||||
print("Test 4: Pattern Deletion - temp-*");
|
||||
print("----------------------------------");
|
||||
|
||||
try {
|
||||
let pattern = "temp-.*";
|
||||
print("Deleting resources matching pattern: " + pattern);
|
||||
|
||||
// Count pods before deletion
|
||||
let pods_before = km.pods_list();
|
||||
let count_before = pods_before.len();
|
||||
print("Pods before deletion: " + count_before);
|
||||
|
||||
// Perform pattern deletion
|
||||
km.delete(pattern);
|
||||
print("✅ Pattern deletion executed");
|
||||
|
||||
// Wait for deletion to propagate
|
||||
print("Waiting for deletion to propagate...");
|
||||
|
||||
// Count pods after deletion
|
||||
let pods_after = km.pods_list();
|
||||
let count_after = pods_after.len();
|
||||
print("Pods after deletion: " + count_after);
|
||||
|
||||
// Should have deleted 2 pods (temp-worker-1, temp-worker-2)
|
||||
let expected_deleted = 2;
|
||||
let actual_deleted = count_before - count_after;
|
||||
|
||||
if actual_deleted >= expected_deleted {
|
||||
print("✅ Pattern deletion successful. Deleted " + actual_deleted + " pods");
|
||||
} else {
|
||||
print("⚠️ Pattern deletion may still be propagating. Expected to delete " + expected_deleted + ", deleted " + actual_deleted);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Temp pattern deletion test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test complex pattern - delete all "*-service" resources
|
||||
print("Test 5: Pattern Deletion - *-service");
|
||||
print("------------------------------------");
|
||||
|
||||
try {
|
||||
let pattern = ".*-service$";
|
||||
print("Deleting resources matching pattern: " + pattern);
|
||||
|
||||
// Count pods before deletion
|
||||
let pods_before = km.pods_list();
|
||||
let count_before = pods_before.len();
|
||||
print("Pods before deletion: " + count_before);
|
||||
|
||||
// Perform pattern deletion
|
||||
km.delete(pattern);
|
||||
print("✅ Pattern deletion executed");
|
||||
|
||||
// Wait for deletion to propagate
|
||||
print("Waiting for deletion to propagate...");
|
||||
|
||||
// Count pods after deletion
|
||||
let pods_after = km.pods_list();
|
||||
let count_after = pods_after.len();
|
||||
print("Pods after deletion: " + count_after);
|
||||
|
||||
// Should have deleted service pods (staging-service, dev-service, permanent-service)
|
||||
let actual_deleted = count_before - count_after;
|
||||
print("✅ Pattern deletion executed. Deleted " + actual_deleted + " pods");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Service pattern deletion test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test safety - verify remaining resources
|
||||
print("Test 6: Verify Remaining Resources");
|
||||
print("----------------------------------");
|
||||
|
||||
try {
|
||||
let remaining_pods = km.pods_list();
|
||||
print("Remaining pods: " + remaining_pods.len());
|
||||
|
||||
print("Remaining pod names:");
|
||||
for pod in remaining_pods {
|
||||
print(" - " + pod.name);
|
||||
}
|
||||
|
||||
// Should only have prod-app-* pods remaining
|
||||
let expected_remaining = ["prod-app-frontend", "prod-app-backend"];
|
||||
|
||||
for pod in remaining_pods {
|
||||
let is_expected = false;
|
||||
for expected in expected_remaining {
|
||||
if pod.name == expected {
|
||||
is_expected = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if is_expected {
|
||||
print("✅ Expected pod remains: " + pod.name);
|
||||
} else {
|
||||
print("⚠️ Unexpected pod remains: " + pod.name);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Remaining resources verification failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Cleanup
|
||||
print("Test 7: Cleanup");
|
||||
print("---------------");
|
||||
|
||||
try {
|
||||
let cleanup_km = kubernetes_manager_new("default");
|
||||
cleanup_km.delete_namespace(test_namespace);
|
||||
print("✅ Test namespace cleanup initiated");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Cleanup failed: " + error);
|
||||
// Don't throw here as this is cleanup
|
||||
}
|
||||
|
||||
print("");
|
||||
print("=== PCRE Pattern Matching Test Complete ===");
|
||||
print("✅ All pattern matching tests passed");
|
||||
print("");
|
||||
print("⚠️ IMPORTANT: Pattern deletion is a powerful feature!");
|
||||
print(" Always test patterns in safe environments first.");
|
||||
print(" Use specific patterns to avoid accidental deletions.");
|
||||
@@ -1,305 +0,0 @@
|
||||
// Test 4: Error Handling and Edge Cases
|
||||
// This test covers error scenarios and edge cases
|
||||
|
||||
// Helper function to generate timestamp for unique names
|
||||
fn timestamp() {
|
||||
let now = 1640995200; // Base timestamp
|
||||
let random = (now % 1000000).to_string();
|
||||
random
|
||||
}
|
||||
|
||||
print("=== Kubernetes Error Handling Test ===");
|
||||
print("");
|
||||
|
||||
// Test connection validation
|
||||
print("Test 1: Connection Validation");
|
||||
print("-----------------------------");
|
||||
|
||||
try {
|
||||
// This should work if cluster is available
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✅ Successfully connected to Kubernetes cluster");
|
||||
|
||||
// Test basic operation to verify connection
|
||||
let namespaces = km.namespaces_list();
|
||||
print("✅ Successfully retrieved " + namespaces.len() + " namespaces");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Kubernetes connection failed: " + error);
|
||||
print("");
|
||||
print("This test requires a running Kubernetes cluster.");
|
||||
print("Please ensure:");
|
||||
print(" - kubectl is configured");
|
||||
print(" - Cluster is accessible");
|
||||
print(" - Proper RBAC permissions are set");
|
||||
print("");
|
||||
throw "Kubernetes cluster not available";
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test invalid namespace handling
|
||||
print("Test 2: Invalid Namespace Handling");
|
||||
print("----------------------------------");
|
||||
|
||||
try {
|
||||
// Try to create manager for invalid namespace name
|
||||
let invalid_names = [
|
||||
"INVALID-UPPERCASE",
|
||||
"invalid_underscore",
|
||||
"invalid.dot",
|
||||
"invalid space",
|
||||
"invalid@symbol",
|
||||
"123-starts-with-number",
|
||||
"ends-with-dash-",
|
||||
"-starts-with-dash"
|
||||
];
|
||||
|
||||
for invalid_name in invalid_names {
|
||||
try {
|
||||
print("Testing invalid namespace: '" + invalid_name + "'");
|
||||
let km = kubernetes_manager_new(invalid_name);
|
||||
|
||||
// If we get here, the name was accepted (might be valid after all)
|
||||
print(" ⚠️ Name was accepted: " + invalid_name);
|
||||
|
||||
} catch (name_error) {
|
||||
print(" ✅ Properly rejected invalid name: " + invalid_name);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Invalid namespace test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test resource not found errors
|
||||
print("Test 3: Resource Not Found Errors");
|
||||
print("---------------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
// Try to get a pod that doesn't exist
|
||||
let nonexistent_pod = "nonexistent-pod-" + timestamp();
|
||||
|
||||
try {
|
||||
let pod = km.get_pod(nonexistent_pod);
|
||||
print("❌ Expected error for nonexistent pod, but got result: " + pod.name);
|
||||
throw "Should have failed to get nonexistent pod";
|
||||
} catch (not_found_error) {
|
||||
print("✅ Properly handled nonexistent pod error: " + not_found_error);
|
||||
}
|
||||
|
||||
// Try to delete a pod that doesn't exist
|
||||
try {
|
||||
km.delete_pod(nonexistent_pod);
|
||||
print("✅ Delete nonexistent pod handled gracefully");
|
||||
} catch (delete_error) {
|
||||
print("✅ Delete nonexistent pod error handled: " + delete_error);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Resource not found test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test invalid resource names
|
||||
print("Test 4: Invalid Resource Names");
|
||||
print("------------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
let invalid_resource_names = [
|
||||
"INVALID-UPPERCASE",
|
||||
"invalid_underscore",
|
||||
"invalid.multiple.dots",
|
||||
"invalid space",
|
||||
"invalid@symbol",
|
||||
"toolong" + "a".repeat(100), // Too long name
|
||||
"", // Empty name
|
||||
"-starts-with-dash",
|
||||
"ends-with-dash-"
|
||||
];
|
||||
|
||||
for invalid_name in invalid_resource_names {
|
||||
try {
|
||||
print("Testing invalid resource name: '" + invalid_name + "'");
|
||||
|
||||
let labels = #{ "test": "invalid-name" };
|
||||
km.create_pod(invalid_name, "nginx:alpine", labels);
|
||||
|
||||
print(" ⚠️ Invalid name was accepted: " + invalid_name);
|
||||
|
||||
// Clean up if it was created
|
||||
try {
|
||||
km.delete_pod(invalid_name);
|
||||
} catch (cleanup_error) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
|
||||
} catch (name_error) {
|
||||
print(" ✅ Properly rejected invalid resource name: " + invalid_name);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Invalid resource names test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test invalid patterns
|
||||
print("Test 5: Invalid PCRE Patterns");
|
||||
print("------------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
let invalid_patterns = [
|
||||
"[unclosed-bracket",
|
||||
"(?invalid-group",
|
||||
"*invalid-quantifier",
|
||||
"(?P<invalid-named-group>)",
|
||||
"\\invalid-escape"
|
||||
];
|
||||
|
||||
for invalid_pattern in invalid_patterns {
|
||||
try {
|
||||
print("Testing invalid pattern: '" + invalid_pattern + "'");
|
||||
km.delete(invalid_pattern);
|
||||
print(" ⚠️ Invalid pattern was accepted: " + invalid_pattern);
|
||||
|
||||
} catch (pattern_error) {
|
||||
print(" ✅ Properly rejected invalid pattern: " + invalid_pattern);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Invalid patterns test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test permission errors (if applicable)
|
||||
print("Test 6: Permission Handling");
|
||||
print("---------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
// Try to create a namespace (might require cluster-admin)
|
||||
let test_ns = "sal-permission-test-" + timestamp();
|
||||
|
||||
try {
|
||||
km.create_namespace(test_ns);
|
||||
print("✅ Namespace creation successful (sufficient permissions)");
|
||||
|
||||
// Clean up
|
||||
try {
|
||||
km.delete_namespace(test_ns);
|
||||
print("✅ Namespace deletion successful");
|
||||
} catch (delete_error) {
|
||||
print("⚠️ Namespace deletion failed: " + delete_error);
|
||||
}
|
||||
|
||||
} catch (permission_error) {
|
||||
print("⚠️ Namespace creation failed (may be permission issue): " + permission_error);
|
||||
print(" This is expected if running with limited RBAC permissions");
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Permission handling test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test empty operations
|
||||
print("Test 7: Empty Operations");
|
||||
print("------------------------");
|
||||
|
||||
try {
|
||||
// Create a temporary namespace for testing
|
||||
let test_namespace = "sal-empty-test-" + timestamp();
|
||||
let setup_km = kubernetes_manager_new("default");
|
||||
|
||||
try {
|
||||
setup_km.create_namespace(test_namespace);
|
||||
let km = kubernetes_manager_new(test_namespace);
|
||||
|
||||
// Test operations on empty namespace
|
||||
let empty_pods = km.pods_list();
|
||||
print("Empty namespace pod count: " + empty_pods.len());
|
||||
|
||||
if empty_pods.len() == 0 {
|
||||
print("✅ Empty namespace handled correctly");
|
||||
} else {
|
||||
print("⚠️ Expected empty namespace, found " + empty_pods.len() + " pods");
|
||||
}
|
||||
|
||||
// Test pattern deletion on empty namespace
|
||||
km.delete(".*");
|
||||
print("✅ Pattern deletion on empty namespace handled");
|
||||
|
||||
// Test resource counts on empty namespace
|
||||
let counts = km.resource_counts();
|
||||
print("✅ Resource counts on empty namespace: " + counts);
|
||||
|
||||
// Cleanup
|
||||
setup_km.delete_namespace(test_namespace);
|
||||
|
||||
} catch (empty_error) {
|
||||
print("❌ Empty operations test failed: " + empty_error);
|
||||
throw empty_error;
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Empty operations setup failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test concurrent operations (basic)
|
||||
print("Test 8: Basic Concurrent Operations");
|
||||
print("-----------------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
// Test multiple rapid operations
|
||||
print("Testing rapid successive operations...");
|
||||
|
||||
for i in range(0, 3) {
|
||||
let namespaces = km.namespaces_list();
|
||||
print(" Iteration " + i + ": " + namespaces.len() + " namespaces");
|
||||
}
|
||||
|
||||
print("✅ Rapid successive operations handled");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Concurrent operations test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
print("=== Error Handling Test Complete ===");
|
||||
print("✅ All error handling tests completed");
|
||||
print("");
|
||||
print("Summary:");
|
||||
print("- Connection validation: ✅");
|
||||
print("- Invalid namespace handling: ✅");
|
||||
print("- Resource not found errors: ✅");
|
||||
print("- Invalid resource names: ✅");
|
||||
print("- Invalid PCRE patterns: ✅");
|
||||
print("- Permission handling: ✅");
|
||||
print("- Empty operations: ✅");
|
||||
print("- Basic concurrent operations: ✅");
|
||||
@@ -1,321 +0,0 @@
|
||||
// Test 5: Production Safety Features
|
||||
// This test covers timeouts, rate limiting, retry logic, and safety features
|
||||
|
||||
print("=== Kubernetes Production Safety Test ===");
|
||||
print("");
|
||||
|
||||
// Test basic safety features
|
||||
print("Test 1: Basic Safety Features");
|
||||
print("-----------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
// Test that manager creation includes safety features
|
||||
print("✅ KubernetesManager created with safety features");
|
||||
|
||||
// Test basic operations work with safety features
|
||||
let namespaces = km.namespaces_list();
|
||||
print("✅ Operations work with safety features enabled");
|
||||
print(" Found " + namespaces.len() + " namespaces");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Basic safety features test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test rate limiting behavior
|
||||
print("Test 2: Rate Limiting Behavior");
|
||||
print("------------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
print("Testing rapid API calls to verify rate limiting...");
|
||||
|
||||
let start_time = timestamp();
|
||||
|
||||
// Make multiple rapid calls
|
||||
for i in range(0, 10) {
|
||||
let namespaces = km.namespaces_list();
|
||||
print(" Call " + i + ": " + namespaces.len() + " namespaces");
|
||||
}
|
||||
|
||||
let end_time = timestamp();
|
||||
let duration = end_time - start_time;
|
||||
|
||||
print("✅ Rate limiting test completed");
|
||||
print(" Duration: " + duration + " seconds");
|
||||
|
||||
if duration > 0 {
|
||||
print("✅ Operations took measurable time (rate limiting may be active)");
|
||||
} else {
|
||||
print("⚠️ Operations completed very quickly (rate limiting may not be needed)");
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Rate limiting test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test timeout behavior (simulated)
|
||||
print("Test 3: Timeout Handling");
|
||||
print("------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
print("Testing timeout handling with normal operations...");
|
||||
|
||||
// Test operations that should complete within timeout
|
||||
let start_time = timestamp();
|
||||
|
||||
try {
|
||||
let namespaces = km.namespaces_list();
|
||||
let end_time = timestamp();
|
||||
let duration = end_time - start_time;
|
||||
|
||||
print("✅ Operation completed within timeout");
|
||||
print(" Duration: " + duration + " seconds");
|
||||
|
||||
if duration < 30 {
|
||||
print("✅ Operation completed quickly (good performance)");
|
||||
} else {
|
||||
print("⚠️ Operation took longer than expected: " + duration + " seconds");
|
||||
}
|
||||
|
||||
} catch (timeout_error) {
|
||||
print("❌ Operation timed out: " + timeout_error);
|
||||
print(" This might indicate network issues or cluster problems");
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Timeout handling test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test retry logic (simulated)
|
||||
print("Test 4: Retry Logic");
|
||||
print("-------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
print("Testing retry logic with normal operations...");
|
||||
|
||||
// Test operations that should succeed (retry logic is internal)
|
||||
let success_count = 0;
|
||||
let total_attempts = 5;
|
||||
|
||||
for i in range(0, total_attempts) {
|
||||
try {
|
||||
let namespaces = km.namespaces_list();
|
||||
success_count = success_count + 1;
|
||||
print(" Attempt " + i + ": ✅ Success (" + namespaces.len() + " namespaces)");
|
||||
} catch (attempt_error) {
|
||||
print(" Attempt " + i + ": ❌ Failed - " + attempt_error);
|
||||
}
|
||||
}
|
||||
|
||||
print("✅ Retry logic test completed");
|
||||
print(" Success rate: " + success_count + "/" + total_attempts);
|
||||
|
||||
if success_count == total_attempts {
|
||||
print("✅ All operations succeeded (good cluster health)");
|
||||
} else if success_count > 0 {
|
||||
print("⚠️ Some operations failed (retry logic may be helping)");
|
||||
} else {
|
||||
print("❌ All operations failed (cluster may be unavailable)");
|
||||
throw "All retry attempts failed";
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Retry logic test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test resource limits and safety
|
||||
print("Test 5: Resource Limits and Safety");
|
||||
print("----------------------------------");
|
||||
|
||||
try {
|
||||
// Create a test namespace for safety testing
|
||||
let test_namespace = "sal-safety-test-" + timestamp();
|
||||
let setup_km = kubernetes_manager_new("default");
|
||||
|
||||
try {
|
||||
setup_km.create_namespace(test_namespace);
|
||||
let km = kubernetes_manager_new(test_namespace);
|
||||
|
||||
print("Testing resource creation limits...");
|
||||
|
||||
// Create a reasonable number of test resources
|
||||
let max_resources = 5; // Keep it reasonable for testing
|
||||
let created_count = 0;
|
||||
|
||||
for i in range(0, max_resources) {
|
||||
try {
|
||||
let resource_name = "safety-test-" + i;
|
||||
let labels = #{ "test": "safety", "index": i };
|
||||
|
||||
km.create_pod(resource_name, "nginx:alpine", labels);
|
||||
created_count = created_count + 1;
|
||||
print(" ✅ Created resource " + i + ": " + resource_name);
|
||||
|
||||
} catch (create_error) {
|
||||
print(" ❌ Failed to create resource " + i + ": " + create_error);
|
||||
}
|
||||
}
|
||||
|
||||
print("✅ Resource creation safety test completed");
|
||||
print(" Created " + created_count + "/" + max_resources + " resources");
|
||||
|
||||
// Test bulk operations safety
|
||||
print("Testing bulk operations safety...");
|
||||
|
||||
let pods_before = km.pods_list();
|
||||
print(" Pods before bulk operation: " + pods_before.len());
|
||||
|
||||
// Use a safe pattern that only matches our test resources
|
||||
let safe_pattern = "safety-test-.*";
|
||||
km.delete(safe_pattern);
|
||||
print(" ✅ Bulk deletion with safe pattern executed");
|
||||
|
||||
// Cleanup
|
||||
setup_km.delete_namespace(test_namespace);
|
||||
print("✅ Test namespace cleaned up");
|
||||
|
||||
} catch (safety_error) {
|
||||
print("❌ Resource safety test failed: " + safety_error);
|
||||
throw safety_error;
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Resource limits and safety test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test logging and monitoring readiness
|
||||
print("Test 6: Logging and Monitoring");
|
||||
print("------------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
print("Testing operations for logging and monitoring...");
|
||||
|
||||
// Perform operations that should generate logs
|
||||
let operations = [
|
||||
"namespaces_list",
|
||||
"resource_counts"
|
||||
];
|
||||
|
||||
for operation in operations {
|
||||
try {
|
||||
if operation == "namespaces_list" {
|
||||
let result = km.namespaces_list();
|
||||
print(" ✅ " + operation + ": " + result.len() + " items");
|
||||
} else if operation == "resource_counts" {
|
||||
let result = km.resource_counts();
|
||||
print(" ✅ " + operation + ": " + result);
|
||||
}
|
||||
} catch (op_error) {
|
||||
print(" ❌ " + operation + " failed: " + op_error);
|
||||
}
|
||||
}
|
||||
|
||||
print("✅ Logging and monitoring test completed");
|
||||
print(" All operations should generate structured logs");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Logging and monitoring test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test configuration validation
|
||||
print("Test 7: Configuration Validation");
|
||||
print("--------------------------------");
|
||||
|
||||
try {
|
||||
print("Testing configuration validation...");
|
||||
|
||||
// Test that manager creation validates configuration
|
||||
let km = kubernetes_manager_new("default");
|
||||
print("✅ Configuration validation passed");
|
||||
|
||||
// Test that manager has expected namespace
|
||||
let manager_namespace = namespace(km);
|
||||
if manager_namespace == "default" {
|
||||
print("✅ Manager namespace correctly set: " + manager_namespace);
|
||||
} else {
|
||||
print("❌ Manager namespace mismatch: " + manager_namespace);
|
||||
throw "Configuration validation failed";
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Configuration validation test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Test graceful degradation
|
||||
print("Test 8: Graceful Degradation");
|
||||
print("----------------------------");
|
||||
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
|
||||
print("Testing graceful degradation scenarios...");
|
||||
|
||||
// Test operations that might fail gracefully
|
||||
try {
|
||||
// Try to access a namespace that might not exist
|
||||
let test_km = kubernetes_manager_new("nonexistent-namespace-" + timestamp());
|
||||
let pods = test_km.pods_list();
|
||||
print(" ⚠️ Nonexistent namespace operation succeeded: " + pods.len() + " pods");
|
||||
} catch (graceful_error) {
|
||||
print(" ✅ Graceful degradation: " + graceful_error);
|
||||
}
|
||||
|
||||
print("✅ Graceful degradation test completed");
|
||||
|
||||
} catch (error) {
|
||||
print("❌ Graceful degradation test failed: " + error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
print("");
|
||||
print("=== Production Safety Test Complete ===");
|
||||
print("✅ All production safety tests completed");
|
||||
print("");
|
||||
print("Production Safety Summary:");
|
||||
print("- Basic safety features: ✅");
|
||||
print("- Rate limiting behavior: ✅");
|
||||
print("- Timeout handling: ✅");
|
||||
print("- Retry logic: ✅");
|
||||
print("- Resource limits and safety: ✅");
|
||||
print("- Logging and monitoring: ✅");
|
||||
print("- Configuration validation: ✅");
|
||||
print("- Graceful degradation: ✅");
|
||||
print("");
|
||||
print("🛡️ Production safety features are working correctly!");
|
||||
|
||||
// Helper function to generate timestamp for unique names
|
||||
fn timestamp() {
|
||||
let now = 1640995200; // Base timestamp
|
||||
let random = (now % 1000000).to_string();
|
||||
random
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
// Kubernetes Integration Tests - Main Test Runner
|
||||
// This script runs all Kubernetes integration tests in sequence
|
||||
|
||||
print("===============================================");
|
||||
print(" SAL Kubernetes Integration Tests");
|
||||
print("===============================================");
|
||||
print("");
|
||||
|
||||
// Helper function to generate timestamp for unique names
|
||||
fn timestamp() {
|
||||
let now = 1640995200; // Base timestamp
|
||||
let random = (now % 1000000).to_string();
|
||||
random
|
||||
}
|
||||
|
||||
// Test configuration
|
||||
let test_files = [
|
||||
"01_namespace_operations.rhai",
|
||||
"02_pod_management.rhai",
|
||||
"03_pcre_pattern_matching.rhai",
|
||||
"04_error_handling.rhai",
|
||||
"05_production_safety.rhai"
|
||||
];
|
||||
|
||||
let total_tests = test_files.len();
|
||||
let passed_tests = 0;
|
||||
let failed_tests = 0;
|
||||
let test_results = [];
|
||||
|
||||
print("🚀 Starting Kubernetes integration tests...");
|
||||
print("Total test files: " + total_tests);
|
||||
print("");
|
||||
|
||||
// Pre-flight checks
|
||||
print("=== Pre-flight Checks ===");
|
||||
|
||||
// Check if Kubernetes cluster is available
|
||||
try {
|
||||
let km = kubernetes_manager_new("default");
|
||||
let namespaces = km.namespaces_list();
|
||||
print("✅ Kubernetes cluster is accessible");
|
||||
print(" Found " + namespaces.len() + " namespaces");
|
||||
|
||||
// Check basic permissions
|
||||
try {
|
||||
let test_ns = "sal-preflight-" + timestamp();
|
||||
km.create_namespace(test_ns);
|
||||
print("✅ Namespace creation permissions available");
|
||||
|
||||
// Clean up
|
||||
km.delete_namespace(test_ns);
|
||||
print("✅ Namespace deletion permissions available");
|
||||
|
||||
} catch (perm_error) {
|
||||
print("⚠️ Limited permissions detected: " + perm_error);
|
||||
print(" Some tests may fail due to RBAC restrictions");
|
||||
}
|
||||
|
||||
} catch (cluster_error) {
|
||||
print("❌ Kubernetes cluster not accessible: " + cluster_error);
|
||||
print("");
|
||||
print("Please ensure:");
|
||||
print(" - Kubernetes cluster is running");
|
||||
print(" - kubectl is configured correctly");
|
||||
print(" - Proper RBAC permissions are set");
|
||||
print(" - Network connectivity to cluster");
|
||||
print("");
|
||||
throw "Pre-flight checks failed";
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Run each test file
|
||||
for i in range(0, test_files.len()) {
|
||||
let test_file = test_files[i];
|
||||
let test_number = i + 1;
|
||||
|
||||
print("=== Test " + test_number + "/" + total_tests + ": " + test_file + " ===");
|
||||
|
||||
let test_start_time = timestamp();
|
||||
|
||||
try {
|
||||
// Note: In a real implementation, we would use eval_file or similar
|
||||
// For now, we'll simulate the test execution
|
||||
print("🔄 Running " + test_file + "...");
|
||||
|
||||
// Simulate test execution based on file name
|
||||
if test_file == "01_namespace_operations.rhai" {
|
||||
print("✅ Namespace operations test completed");
|
||||
} else if test_file == "02_pod_management.rhai" {
|
||||
print("✅ Pod management test completed");
|
||||
} else if test_file == "03_pcre_pattern_matching.rhai" {
|
||||
print("✅ PCRE pattern matching test completed");
|
||||
} else if test_file == "04_error_handling.rhai" {
|
||||
print("✅ Error handling test completed");
|
||||
} else if test_file == "05_production_safety.rhai" {
|
||||
print("✅ Production safety test completed");
|
||||
}
|
||||
|
||||
passed_tests = passed_tests + 1;
|
||||
test_results.push(#{ "file": test_file, "status": "PASSED", "error": "" });
|
||||
|
||||
print("✅ " + test_file + " PASSED");
|
||||
|
||||
} catch (test_error) {
|
||||
failed_tests = failed_tests + 1;
|
||||
test_results.push(#{ "file": test_file, "status": "FAILED", "error": test_error });
|
||||
|
||||
print("❌ " + test_file + " FAILED: " + test_error);
|
||||
}
|
||||
|
||||
let test_end_time = timestamp();
|
||||
print(" Duration: " + (test_end_time - test_start_time) + " seconds");
|
||||
print("");
|
||||
}
|
||||
|
||||
// Print summary
|
||||
print("===============================================");
|
||||
print(" Test Summary");
|
||||
print("===============================================");
|
||||
print("");
|
||||
print("Total tests: " + total_tests);
|
||||
print("Passed: " + passed_tests);
|
||||
print("Failed: " + failed_tests);
|
||||
print("Success rate: " + ((passed_tests * 100) / total_tests) + "%");
|
||||
print("");
|
||||
|
||||
// Print detailed results
|
||||
print("Detailed Results:");
|
||||
print("-----------------");
|
||||
for result in test_results {
|
||||
let status_icon = if result.status == "PASSED" { "✅" } else { "❌" };
|
||||
print(status_icon + " " + result.file + " - " + result.status);
|
||||
|
||||
if result.status == "FAILED" && result.error != "" {
|
||||
print(" Error: " + result.error);
|
||||
}
|
||||
}
|
||||
|
||||
print("");
|
||||
|
||||
// Final assessment
|
||||
if failed_tests == 0 {
|
||||
print("🎉 ALL TESTS PASSED!");
|
||||
print("✅ Kubernetes module is ready for production use");
|
||||
print("");
|
||||
print("Key features verified:");
|
||||
print(" ✅ Namespace operations");
|
||||
print(" ✅ Pod management");
|
||||
print(" ✅ PCRE pattern matching");
|
||||
print(" ✅ Error handling");
|
||||
print(" ✅ Production safety features");
|
||||
|
||||
} else if passed_tests > failed_tests {
|
||||
print("⚠️ MOSTLY SUCCESSFUL");
|
||||
print("Most tests passed, but some issues were found.");
|
||||
print("Review failed tests before production deployment.");
|
||||
|
||||
} else {
|
||||
print("❌ SIGNIFICANT ISSUES FOUND");
|
||||
print("Multiple tests failed. Review and fix issues before proceeding.");
|
||||
throw "Integration tests failed";
|
||||
}
|
||||
|
||||
print("");
|
||||
print("===============================================");
|
||||
print(" Kubernetes Integration Tests Complete");
|
||||
print("===============================================");
|
||||
|
||||
// Additional notes
|
||||
print("");
|
||||
print("📝 Notes:");
|
||||
print(" - These tests require a running Kubernetes cluster");
|
||||
print(" - Some tests create and delete resources");
|
||||
print(" - Pattern deletion tests demonstrate powerful bulk operations");
|
||||
print(" - All test resources are cleaned up automatically");
|
||||
print(" - Tests are designed to be safe and non-destructive");
|
||||
print("");
|
||||
print("🔒 Security Reminders:");
|
||||
print(" - Pattern deletion is powerful - always test patterns first");
|
||||
print(" - Use specific patterns to avoid accidental deletions");
|
||||
print(" - Review RBAC permissions for production use");
|
||||
print(" - Monitor resource usage and API rate limits");
|
||||
print("");
|
||||
print("🚀 Ready for production deployment!");
|
||||
@@ -21,22 +21,31 @@ fn assert_eq(actual, expected, message) {
|
||||
|
||||
// Helper function to check if nerdctl is available
|
||||
fn is_nerdctl_available() {
|
||||
try {
|
||||
let result = run("which nerdctl");
|
||||
return result.success;
|
||||
} catch(err) {
|
||||
return false;
|
||||
}
|
||||
let command = run("which nerdctl");
|
||||
return command.silent().execute().success;
|
||||
}
|
||||
|
||||
// Helper function to check if a container exists
|
||||
fn container_exists(container_name) {
|
||||
try {
|
||||
let result = run(`nerdctl ps -a --format "{{.Names}}" | grep -w ${container_name}`);
|
||||
return result.success;
|
||||
} catch(err) {
|
||||
// let command = run(`nerdctl ps -a --format "{{.Names}}" | grep -w ${container_name}`);
|
||||
let command = run(`nerdctl ps -a --format "{{.Names}}"`);
|
||||
let result = command.silent().execute();
|
||||
|
||||
// Check if the command was successful
|
||||
if !result.success {
|
||||
print(`Error executing 'nerdctl ps': ${result.stderr}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Split the output into individual lines (names)
|
||||
// and check if any of them is an exact match for our container name.
|
||||
for line in result.stdout.split('\n') {
|
||||
if line.trim() == container_name {
|
||||
return true; // Found the container
|
||||
}
|
||||
}
|
||||
|
||||
return false; // Did not find the container
|
||||
}
|
||||
|
||||
// Helper function to clean up a container if it exists
|
||||
@@ -49,6 +58,8 @@ fn cleanup_container(container_name) {
|
||||
} catch(err) {
|
||||
print(`Error cleaning up container ${container_name}: ${err}`);
|
||||
}
|
||||
} else {
|
||||
print!(`No container with name ${container_name} found. Nothing to clean up.`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,8 +69,7 @@ print("=== Testing Nerdctl Container Operations ===");
|
||||
let nerdctl_available = is_nerdctl_available();
|
||||
if !nerdctl_available {
|
||||
print("nerdctl is not available. Skipping Nerdctl tests.");
|
||||
// Exit gracefully without error
|
||||
return;
|
||||
throw err;
|
||||
}
|
||||
|
||||
print("✓ nerdctl is available");
|
||||
@@ -81,84 +91,132 @@ try {
|
||||
assert_eq(container.container_id, "", "Container ID should be empty initially");
|
||||
|
||||
// Test setting container image
|
||||
print("Testing with_image()...");
|
||||
container.with_image("alpine:latest");
|
||||
print("Testing image setter...");
|
||||
container.image = "alpine:latest";
|
||||
assert_eq(container.image, "alpine:latest", "Container image should match");
|
||||
|
||||
// Test setting container config
|
||||
print("Testing config setter...");
|
||||
let config_options = #{"key1": "value1", "key2": "value2"};
|
||||
container.config = config_options;
|
||||
assert_eq(container.config, config_options, "Container config options should match");
|
||||
|
||||
// Test container_id setter and getter
|
||||
print("Testing container_id setter...");
|
||||
container.container_id = "test-id";
|
||||
assert_eq(container.container_id, "test-id", "Container ID should be 'test-id'");
|
||||
|
||||
// Test setting detach mode
|
||||
print("Testing with_detach()...");
|
||||
container.with_detach(true);
|
||||
assert_true(container.detach, "Container detach mode should be true");
|
||||
// Test ports setter and getter
|
||||
print("Testing ports setter and getter...");
|
||||
let ports_list = ["1234", "2345"];
|
||||
container.ports = ports_list;
|
||||
assert_eq(container.ports, ports_list, "Container ports should match");
|
||||
|
||||
// Test volumes setter and getter
|
||||
print("Testing volumes setter and getter...");
|
||||
let volumes_list = ["/tmp:/tmp"];
|
||||
container.volumes = volumes_list;
|
||||
assert_eq(container.volumes, volumes_list, "Container volumes should match");
|
||||
|
||||
// Test env_vars setter and getter
|
||||
print("Testing env_vars setter and getter...");
|
||||
let env_vars_map = #{"VAR1": "value1", "VAR2": "value2"};
|
||||
container.env_vars = env_vars_map;
|
||||
assert_eq(container.env_vars, env_vars_map, "Container env_vars should match");
|
||||
|
||||
// Test network setter and getter
|
||||
print("Testing network setter and getter...");
|
||||
container.network = "test-net";
|
||||
assert_eq(container.network, "test-net", "Container network should match");
|
||||
|
||||
// Test network_aliases setter and getter
|
||||
print("Testing network_aliases setter and getter...");
|
||||
let aliases = ["alias1", "alias2"];
|
||||
container.network_aliases = aliases;
|
||||
assert_eq(container.network_aliases, aliases, "Container network_aliases should match");
|
||||
|
||||
// Test cpu_limit setter and getter
|
||||
print("Testing cpu_limit setter and getter...");
|
||||
container.cpu_limit = "0.5";
|
||||
assert_eq(container.cpu_limit, "0.5", "Container cpu_limit should match");
|
||||
|
||||
// Test memory_limit setter and getter
|
||||
print("Testing memory_limit setter and getter...");
|
||||
container.memory_limit = "512m";
|
||||
assert_eq(container.memory_limit, "512m", "Container memory_limit should match");
|
||||
|
||||
// Test memory_swap_limit setter and getter
|
||||
print("Testing memory_swap_limit setter and getter...");
|
||||
container.memory_swap_limit = "1g";
|
||||
assert_eq(container.memory_swap_limit, "1g", "Container memory_swap_limit should match");
|
||||
|
||||
// Test cpu_shares setter and getter
|
||||
print("Testing cpu_shares setter and getter...");
|
||||
container.cpu_shares = "1024";
|
||||
assert_eq(container.cpu_shares, "1024", "Container cpu_shares should match");
|
||||
|
||||
// Test restart_policy setter and getter
|
||||
print("Testing restart_policy setter and getter...");
|
||||
container.restart_policy = "always";
|
||||
assert_eq(container.restart_policy, "always", "Container restart_policy should match");
|
||||
|
||||
// Test detach setter and getter
|
||||
print("Testing detach setter and getter...");
|
||||
container.detach = false;
|
||||
assert_eq(container.detach, false, "Container detach should be false");
|
||||
container.detach = true;
|
||||
assert_eq(container.detach, true, "Container detach should be true");
|
||||
|
||||
// Test health_check setter and getter
|
||||
print("Testing health_check setter and getter...");
|
||||
let health_check_new = health_check_new("example_cmd");
|
||||
container.health_check = health_check_new;
|
||||
container.health_check.interval = "example_interval";
|
||||
assert_eq(container.health_check.cmd, "example_cmd", "Container health check cmd should match");
|
||||
assert_eq(container.health_check.interval, "example_interval", "Container health check interval should match");
|
||||
|
||||
// Test snapshotter setter and getter
|
||||
print("Testing snapshotter setter and getter...");
|
||||
container.snapshotter = "stargz";
|
||||
assert_eq(container.snapshotter, "stargz", "Container snapshotter should match");
|
||||
|
||||
// // Test running the container
|
||||
// print("Testing run()...");
|
||||
// let run_result = container.run();
|
||||
// assert_true(run_result.success, "Container run should succeed");
|
||||
// assert_true(container.container_id != "", "Container ID should not be empty after run");
|
||||
// print(`✓ run(): Container started with ID: ${container.container_id}`);
|
||||
|
||||
// Test setting environment variables
|
||||
print("Testing with_env()...");
|
||||
container.with_env("TEST_VAR", "test_value");
|
||||
// // Test executing a command in the container
|
||||
// print("Testing exec()...");
|
||||
// let exec_result = container.exec("echo 'Hello from container'");
|
||||
// assert_true(exec_result.success, "Container exec should succeed");
|
||||
// assert_true(exec_result.stdout.contains("Hello from container"), "Exec output should contain expected text");
|
||||
// print("✓ exec(): Command executed successfully");
|
||||
|
||||
// Test setting multiple environment variables
|
||||
print("Testing with_envs()...");
|
||||
let env_map = #{
|
||||
"VAR1": "value1",
|
||||
"VAR2": "value2"
|
||||
};
|
||||
container.with_envs(env_map);
|
||||
// // Test getting container logs
|
||||
// print("Testing logs()...");
|
||||
// let logs_result = container.logs();
|
||||
// assert_true(logs_result.success, "Container logs should succeed");
|
||||
// print("✓ logs(): Logs retrieved successfully");
|
||||
|
||||
// Test setting ports
|
||||
print("Testing with_port()...");
|
||||
container.with_port("8080:80");
|
||||
// // Test stopping the container
|
||||
// print("Testing stop()...");
|
||||
// let stop_result = container.stop();
|
||||
// assert_true(stop_result.success, "Container stop should succeed");
|
||||
// print("✓ stop(): Container stopped successfully");
|
||||
|
||||
// Test setting multiple ports
|
||||
print("Testing with_ports()...");
|
||||
container.with_ports(["9090:90", "7070:70"]);
|
||||
// // Test removing the container
|
||||
// print("Testing remove()...");
|
||||
// let remove_result = container.remove();
|
||||
// assert_true(remove_result.success, "Container remove should succeed");
|
||||
// print("✓ remove(): Container removed successfully");
|
||||
|
||||
// Test setting volumes
|
||||
print("Testing with_volume()...");
|
||||
// Create a test directory for volume mounting
|
||||
let test_dir = "rhai_test_nerdctl_volume";
|
||||
mkdir(test_dir);
|
||||
container.with_volume(`${test_dir}:/data`);
|
||||
// // Clean up test directory
|
||||
// delete(test_dir);
|
||||
// print("✓ Cleanup: Test directory removed");
|
||||
|
||||
// Test setting resource limits
|
||||
print("Testing with_cpu_limit() and with_memory_limit()...");
|
||||
container.with_cpu_limit("0.5");
|
||||
container.with_memory_limit("256m");
|
||||
|
||||
// Test running the container
|
||||
print("Testing run()...");
|
||||
let run_result = container.run();
|
||||
assert_true(run_result.success, "Container run should succeed");
|
||||
assert_true(container.container_id != "", "Container ID should not be empty after run");
|
||||
print(`✓ run(): Container started with ID: ${container.container_id}`);
|
||||
|
||||
// Test executing a command in the container
|
||||
print("Testing exec()...");
|
||||
let exec_result = container.exec("echo 'Hello from container'");
|
||||
assert_true(exec_result.success, "Container exec should succeed");
|
||||
assert_true(exec_result.stdout.contains("Hello from container"), "Exec output should contain expected text");
|
||||
print("✓ exec(): Command executed successfully");
|
||||
|
||||
// Test getting container logs
|
||||
print("Testing logs()...");
|
||||
let logs_result = container.logs();
|
||||
assert_true(logs_result.success, "Container logs should succeed");
|
||||
print("✓ logs(): Logs retrieved successfully");
|
||||
|
||||
// Test stopping the container
|
||||
print("Testing stop()...");
|
||||
let stop_result = container.stop();
|
||||
assert_true(stop_result.success, "Container stop should succeed");
|
||||
print("✓ stop(): Container stopped successfully");
|
||||
|
||||
// Test removing the container
|
||||
print("Testing remove()...");
|
||||
let remove_result = container.remove();
|
||||
assert_true(remove_result.success, "Container remove should succeed");
|
||||
print("✓ remove(): Container removed successfully");
|
||||
|
||||
// Clean up test directory
|
||||
delete(test_dir);
|
||||
print("✓ Cleanup: Test directory removed");
|
||||
|
||||
print("All container operations tests completed successfully!");
|
||||
// print("All container operations tests completed successfully!");
|
||||
} catch(err) {
|
||||
print(`Error: ${err}`);
|
||||
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
// Service Manager - Service Lifecycle Test
|
||||
// Tests the complete lifecycle of service management operations
|
||||
|
||||
print("🚀 Service Manager - Service Lifecycle Test");
|
||||
print("============================================");
|
||||
|
||||
// Note: This test demonstrates the service manager API structure
|
||||
// In practice, service_manager would be integrated through SAL's Rhai bindings
|
||||
|
||||
// Test service configuration structure
|
||||
let test_config = #{
|
||||
name: "test-service",
|
||||
binary_path: "/bin/echo",
|
||||
args: ["Hello from service manager test!"],
|
||||
working_directory: "/tmp",
|
||||
environment: #{
|
||||
"TEST_VAR": "test_value",
|
||||
"SERVICE_TYPE": "test"
|
||||
},
|
||||
auto_restart: false
|
||||
};
|
||||
|
||||
print("📝 Test Service Configuration:");
|
||||
print(` Name: ${test_config.name}`);
|
||||
print(` Binary: ${test_config.binary_path}`);
|
||||
print(` Args: ${test_config.args}`);
|
||||
print(` Working Dir: ${test_config.working_directory}`);
|
||||
print(` Auto Restart: ${test_config.auto_restart}`);
|
||||
|
||||
// Test service lifecycle operations (API demonstration)
|
||||
print("\n🔄 Service Lifecycle Operations:");
|
||||
|
||||
print("1️⃣ Service Creation");
|
||||
print(" - create_service_manager() -> ServiceManager");
|
||||
print(" - Automatically detects platform (macOS: launchctl, Linux: zinit)");
|
||||
|
||||
print("\n2️⃣ Service Deployment");
|
||||
print(" - manager.start(config) -> Result<(), Error>");
|
||||
print(" - Creates platform-specific service files");
|
||||
print(" - Starts the service");
|
||||
|
||||
print("\n3️⃣ Service Monitoring");
|
||||
print(" - manager.status(service_name) -> Result<ServiceStatus, Error>");
|
||||
print(" - manager.logs(service_name, lines) -> Result<String, Error>");
|
||||
print(" - manager.list() -> Result<Vec<String>, Error>");
|
||||
|
||||
print("\n4️⃣ Service Management");
|
||||
print(" - manager.stop(service_name) -> Result<(), Error>");
|
||||
print(" - manager.restart(service_name) -> Result<(), Error>");
|
||||
print(" - manager.start_and_confirm(config, timeout) -> Result<(), Error>");
|
||||
|
||||
print("\n5️⃣ Service Cleanup");
|
||||
print(" - manager.remove(service_name) -> Result<(), Error>");
|
||||
print(" - Removes service files and configuration");
|
||||
|
||||
// Test error handling scenarios
|
||||
print("\n❌ Error Handling:");
|
||||
print(" - ServiceNotFound: Service doesn't exist");
|
||||
print(" - ServiceAlreadyExists: Service already running");
|
||||
print(" - StartFailed: Service failed to start");
|
||||
print(" - StopFailed: Service failed to stop");
|
||||
print(" - Other: Platform-specific errors");
|
||||
|
||||
// Test platform-specific behavior
|
||||
print("\n🖥️ Platform-Specific Behavior:");
|
||||
print(" macOS (launchctl):");
|
||||
print(" - Creates .plist files in ~/Library/LaunchAgents/");
|
||||
print(" - Uses launchctl load/unload commands");
|
||||
print(" - Integrates with macOS service management");
|
||||
print("");
|
||||
print(" Linux (zinit):");
|
||||
print(" - Communicates via zinit socket (/tmp/zinit.sock)");
|
||||
print(" - Lightweight service management");
|
||||
print(" - Fast startup and monitoring");
|
||||
|
||||
print("\n✅ Service Lifecycle Test Complete");
|
||||
print(" All API operations demonstrated successfully");
|
||||
@@ -1,138 +0,0 @@
|
||||
// Service Manager - Circle Worker Deployment Test
|
||||
// Tests the primary use case: dynamic circle worker deployment for freezone residents
|
||||
|
||||
print("🎯 Service Manager - Circle Worker Deployment Test");
|
||||
print("=================================================");
|
||||
|
||||
// Simulate freezone resident registration event
|
||||
let resident_id = "resident_12345";
|
||||
let resident_name = "Alice Johnson";
|
||||
let freezone_region = "europe-west";
|
||||
|
||||
print(`📝 New Freezone Resident Registration:`);
|
||||
print(` Resident ID: ${resident_id}`);
|
||||
print(` Name: ${resident_name}`);
|
||||
print(` Region: ${freezone_region}`);
|
||||
|
||||
// Create circle worker configuration for the new resident
|
||||
let worker_name = `circle-worker-${resident_id}`;
|
||||
let worker_config = #{
|
||||
name: worker_name,
|
||||
binary_path: "/usr/bin/circle-worker",
|
||||
args: [
|
||||
"--resident-id", resident_id,
|
||||
"--region", freezone_region,
|
||||
"--mode", "production"
|
||||
],
|
||||
working_directory: `/var/lib/circle-workers/${resident_id}`,
|
||||
environment: #{
|
||||
"RESIDENT_ID": resident_id,
|
||||
"RESIDENT_NAME": resident_name,
|
||||
"FREEZONE_REGION": freezone_region,
|
||||
"WORKER_TYPE": "circle",
|
||||
"LOG_LEVEL": "info",
|
||||
"METRICS_ENABLED": "true"
|
||||
},
|
||||
auto_restart: true
|
||||
};
|
||||
|
||||
print(`\n🔧 Circle Worker Configuration:`);
|
||||
print(` Worker Name: ${worker_config.name}`);
|
||||
print(` Binary: ${worker_config.binary_path}`);
|
||||
print(` Arguments: ${worker_config.args}`);
|
||||
print(` Working Directory: ${worker_config.working_directory}`);
|
||||
print(` Auto Restart: ${worker_config.auto_restart}`);
|
||||
|
||||
// Demonstrate the deployment process
|
||||
print("\n🚀 Circle Worker Deployment Process:");
|
||||
|
||||
print("1️⃣ Service Manager Creation");
|
||||
print(" let manager = create_service_manager();");
|
||||
print(" // Automatically selects platform-appropriate implementation");
|
||||
|
||||
print("\n2️⃣ Pre-deployment Checks");
|
||||
print(` if manager.exists("${worker_name}") {`);
|
||||
print(" // Handle existing worker (update or restart)");
|
||||
print(" }");
|
||||
|
||||
print("\n3️⃣ Worker Deployment");
|
||||
print(" manager.start(worker_config)?;");
|
||||
print(" // Creates service files and starts the worker");
|
||||
|
||||
print("\n4️⃣ Deployment Confirmation");
|
||||
print(" manager.start_and_confirm(worker_config, 30)?;");
|
||||
print(" // Waits up to 30 seconds for worker to be running");
|
||||
|
||||
print("\n5️⃣ Health Check");
|
||||
print(` let status = manager.status("${worker_name}")?;`);
|
||||
print(" // Verify worker is running correctly");
|
||||
|
||||
print("\n6️⃣ Monitoring Setup");
|
||||
print(` let logs = manager.logs("${worker_name}", 50)?;`);
|
||||
print(" // Retrieve initial logs for monitoring");
|
||||
|
||||
// Demonstrate scaling scenarios
|
||||
print("\n📈 Scaling Scenarios:");
|
||||
|
||||
print("Multiple Residents:");
|
||||
let residents = ["resident_12345", "resident_67890", "resident_11111"];
|
||||
for resident in residents {
|
||||
let worker = `circle-worker-${resident}`;
|
||||
print(` - Deploy worker: ${worker}`);
|
||||
print(` manager.start(create_worker_config("${resident}"))?;`);
|
||||
}
|
||||
|
||||
print("\nWorker Updates:");
|
||||
print(" - Stop existing worker");
|
||||
print(" - Deploy new version");
|
||||
print(" - Verify health");
|
||||
print(" - Remove old configuration");
|
||||
|
||||
print("\nRegion-based Deployment:");
|
||||
print(" - europe-west: 3 workers");
|
||||
print(" - us-east: 5 workers");
|
||||
print(" - asia-pacific: 2 workers");
|
||||
|
||||
// Demonstrate cleanup scenarios
|
||||
print("\n🧹 Cleanup Scenarios:");
|
||||
|
||||
print("Resident Departure:");
|
||||
print(` manager.stop("${worker_name}")?;`);
|
||||
print(` manager.remove("${worker_name}")?;`);
|
||||
print(" // Clean removal when resident leaves");
|
||||
|
||||
print("\nMaintenance Mode:");
|
||||
print(" // Stop all workers");
|
||||
print(" let workers = manager.list()?;");
|
||||
print(" for worker in workers {");
|
||||
print(" if worker.starts_with('circle-worker-') {");
|
||||
print(" manager.stop(worker)?;");
|
||||
print(" }");
|
||||
print(" }");
|
||||
|
||||
// Production considerations
|
||||
print("\n🏭 Production Considerations:");
|
||||
|
||||
print("Resource Management:");
|
||||
print(" - CPU/Memory limits per worker");
|
||||
print(" - Disk space monitoring");
|
||||
print(" - Network bandwidth allocation");
|
||||
|
||||
print("Fault Tolerance:");
|
||||
print(" - Auto-restart on failure");
|
||||
print(" - Health check endpoints");
|
||||
print(" - Graceful shutdown handling");
|
||||
|
||||
print("Security:");
|
||||
print(" - Isolated worker environments");
|
||||
print(" - Secure communication channels");
|
||||
print(" - Access control and permissions");
|
||||
|
||||
print("Monitoring:");
|
||||
print(" - Real-time status monitoring");
|
||||
print(" - Log aggregation and analysis");
|
||||
print(" - Performance metrics collection");
|
||||
|
||||
print("\n✅ Circle Worker Deployment Test Complete");
|
||||
print(" Dynamic worker deployment demonstrated successfully");
|
||||
print(" Ready for production freezone environment");
|
||||
@@ -1,166 +0,0 @@
|
||||
// Service Manager - Cross-Platform Compatibility Test
|
||||
// Tests platform-specific behavior and compatibility
|
||||
|
||||
print("🌐 Service Manager - Cross-Platform Compatibility Test");
|
||||
print("=====================================================");
|
||||
|
||||
// Test platform detection
|
||||
print("🔍 Platform Detection:");
|
||||
print(" create_service_manager() automatically detects:");
|
||||
|
||||
print("\n🍎 macOS Platform:");
|
||||
print(" Implementation: LaunchctlServiceManager");
|
||||
print(" Service Files: ~/.config/systemd/user/ or /etc/systemd/system/");
|
||||
print(" Commands: launchctl load/unload/start/stop");
|
||||
print(" Features:");
|
||||
print(" - Plist file generation");
|
||||
print(" - User and system service support");
|
||||
print(" - Native macOS integration");
|
||||
print(" - Automatic service registration");
|
||||
|
||||
print("\n🐧 Linux Platform:");
|
||||
print(" Implementation: ZinitServiceManager (default)");
|
||||
print(" Communication: Unix socket (/tmp/zinit.sock)");
|
||||
print(" Commands: zinit client API calls");
|
||||
print(" Features:");
|
||||
print(" - Lightweight service management");
|
||||
print(" - Fast startup and monitoring");
|
||||
print(" - JSON-based configuration");
|
||||
print(" - Real-time status updates");
|
||||
|
||||
print("\n🔧 Alternative Linux Implementation:");
|
||||
print(" Implementation: SystemdServiceManager");
|
||||
print(" Service Files: ~/.config/systemd/user/ or /etc/systemd/system/");
|
||||
print(" Commands: systemctl start/stop/restart/status");
|
||||
print(" Usage: create_systemd_service_manager()");
|
||||
|
||||
// Test service configuration compatibility
|
||||
print("\n📋 Service Configuration Compatibility:");
|
||||
|
||||
let universal_config = #{
|
||||
name: "cross-platform-service",
|
||||
binary_path: "/usr/bin/example-app",
|
||||
args: ["--config", "/etc/app.conf"],
|
||||
working_directory: "/var/lib/app",
|
||||
environment: #{
|
||||
"APP_ENV": "production",
|
||||
"LOG_LEVEL": "info"
|
||||
},
|
||||
auto_restart: true
|
||||
};
|
||||
|
||||
print("Universal Configuration:");
|
||||
print(` Name: ${universal_config.name}`);
|
||||
print(` Binary: ${universal_config.binary_path}`);
|
||||
print(` Auto Restart: ${universal_config.auto_restart}`);
|
||||
|
||||
// Platform-specific adaptations
|
||||
print("\n🔄 Platform-Specific Adaptations:");
|
||||
|
||||
print("macOS (launchctl):");
|
||||
print(" - Converts to plist format");
|
||||
print(" - Maps environment variables to <key><string> pairs");
|
||||
print(" - Sets up LaunchAgent or LaunchDaemon");
|
||||
print(" - Handles user vs system service placement");
|
||||
|
||||
print("Linux (zinit):");
|
||||
print(" - Converts to zinit service definition");
|
||||
print(" - Direct JSON configuration");
|
||||
print(" - Socket-based communication");
|
||||
print(" - Lightweight process management");
|
||||
|
||||
print("Linux (systemd):");
|
||||
print(" - Generates .service unit files");
|
||||
print(" - Maps to systemd service properties");
|
||||
print(" - Supports user and system services");
|
||||
print(" - Integrates with systemd ecosystem");
|
||||
|
||||
// Test error handling across platforms
|
||||
print("\n❌ Cross-Platform Error Handling:");
|
||||
|
||||
print("Common Errors:");
|
||||
print(" - ServiceNotFound: Consistent across platforms");
|
||||
print(" - ServiceAlreadyExists: Unified error handling");
|
||||
print(" - StartFailed: Platform-specific details preserved");
|
||||
|
||||
print("Platform-Specific Errors:");
|
||||
print(" macOS:");
|
||||
print(" - Plist parsing errors");
|
||||
print(" - LaunchAgent permission issues");
|
||||
print(" - System service restrictions");
|
||||
print("");
|
||||
print(" Linux (zinit):");
|
||||
print(" - Socket connection failures");
|
||||
print(" - Zinit daemon not running");
|
||||
print(" - JSON configuration errors");
|
||||
print("");
|
||||
print(" Linux (systemd):");
|
||||
print(" - Unit file syntax errors");
|
||||
print(" - Systemd daemon communication issues");
|
||||
print(" - Permission and security context errors");
|
||||
|
||||
// Test feature compatibility matrix
|
||||
print("\n📊 Feature Compatibility Matrix:");
|
||||
|
||||
print("Core Features (All Platforms):");
|
||||
print(" ✅ Service start/stop/restart");
|
||||
print(" ✅ Status monitoring");
|
||||
print(" ✅ Log retrieval");
|
||||
print(" ✅ Service listing");
|
||||
print(" ✅ Service removal");
|
||||
print(" ✅ Environment variables");
|
||||
print(" ✅ Working directory");
|
||||
print(" ✅ Auto-restart configuration");
|
||||
|
||||
print("Advanced Features:");
|
||||
print(" Feature | macOS | Linux(zinit) | Linux(systemd)");
|
||||
print(" ----------------------|-------|--------------|---------------");
|
||||
print(" User services | ✅ | ✅ | ✅ ");
|
||||
print(" System services | ✅ | ✅ | ✅ ");
|
||||
print(" Service dependencies | ✅ | ⚠️ | ✅ ");
|
||||
print(" Resource limits | ⚠️ | ⚠️ | ✅ ");
|
||||
print(" Security contexts | ✅ | ⚠️ | ✅ ");
|
||||
|
||||
// Test deployment strategies
|
||||
print("\n🚀 Cross-Platform Deployment Strategies:");
|
||||
|
||||
print("Strategy 1: Platform-Agnostic");
|
||||
print(" - Use create_service_manager()");
|
||||
print(" - Rely on automatic platform detection");
|
||||
print(" - Consistent API across platforms");
|
||||
|
||||
print("Strategy 2: Platform-Specific Optimization");
|
||||
print(" - Detect platform manually");
|
||||
print(" - Use platform-specific features");
|
||||
print(" - Optimize for platform capabilities");
|
||||
|
||||
print("Strategy 3: Hybrid Approach");
|
||||
print(" - Default to platform-agnostic");
|
||||
print(" - Override for specific requirements");
|
||||
print(" - Fallback mechanisms for edge cases");
|
||||
|
||||
// Test migration scenarios
|
||||
print("\n🔄 Migration Scenarios:");
|
||||
|
||||
print("macOS to Linux:");
|
||||
print(" 1. Export service configurations");
|
||||
print(" 2. Convert plist to universal format");
|
||||
print(" 3. Deploy on Linux with zinit/systemd");
|
||||
print(" 4. Verify functionality");
|
||||
|
||||
print("Zinit to Systemd:");
|
||||
print(" 1. Stop zinit services");
|
||||
print(" 2. Convert to systemd units");
|
||||
print(" 3. Enable systemd services");
|
||||
print(" 4. Validate migration");
|
||||
|
||||
print("Development to Production:");
|
||||
print(" 1. Test on development platform");
|
||||
print(" 2. Package for target platform");
|
||||
print(" 3. Deploy with platform-specific optimizations");
|
||||
print(" 4. Monitor and validate");
|
||||
|
||||
print("\n✅ Cross-Platform Compatibility Test Complete");
|
||||
print(" All platforms supported with consistent API");
|
||||
print(" Platform-specific optimizations available");
|
||||
print(" Migration paths documented and tested");
|
||||
@@ -1,85 +0,0 @@
|
||||
// Service Manager - Run All Tests
|
||||
// Executes all service manager tests in sequence
|
||||
|
||||
print("🧪 Service Manager - Test Suite");
|
||||
print("===============================");
|
||||
print("");
|
||||
|
||||
// Test execution tracking
|
||||
let tests_run = 0;
|
||||
let tests_passed = 0;
|
||||
|
||||
// Helper function to run a test
|
||||
fn run_test(test_name, test_file) {
|
||||
print(`🔄 Running ${test_name}...`);
|
||||
|
||||
try {
|
||||
// In a real implementation, this would execute the test file
|
||||
// For now, we'll simulate successful test execution
|
||||
print(` 📁 Loading: ${test_file}`);
|
||||
print(` ✅ ${test_name} completed successfully`);
|
||||
print("");
|
||||
return true; // Return success
|
||||
} catch (error) {
|
||||
print(` ❌ ${test_name} failed: ${error}`);
|
||||
print("");
|
||||
return false; // Return failure
|
||||
}
|
||||
}
|
||||
|
||||
// Execute all service manager tests
|
||||
print("📋 Test Execution Plan:");
|
||||
print("1. Service Lifecycle Test");
|
||||
print("2. Circle Worker Deployment Test");
|
||||
print("3. Cross-Platform Compatibility Test");
|
||||
print("");
|
||||
|
||||
// Run individual tests
|
||||
tests_run += 1;
|
||||
if run_test("Service Lifecycle Test", "01_service_lifecycle.rhai") {
|
||||
tests_passed += 1;
|
||||
}
|
||||
|
||||
tests_run += 1;
|
||||
if run_test("Circle Worker Deployment Test", "02_circle_worker_deployment.rhai") {
|
||||
tests_passed += 1;
|
||||
}
|
||||
|
||||
tests_run += 1;
|
||||
if run_test("Cross-Platform Compatibility Test", "03_cross_platform_compatibility.rhai") {
|
||||
tests_passed += 1;
|
||||
}
|
||||
|
||||
// Test summary
|
||||
print("📊 Test Summary:");
|
||||
print("===============");
|
||||
print(`Total Tests: ${tests_run}`);
|
||||
print(`Passed: ${tests_passed}`);
|
||||
print(`Failed: ${tests_run - tests_passed}`);
|
||||
|
||||
if tests_passed == tests_run {
|
||||
print("🎉 All tests passed!");
|
||||
print("");
|
||||
print("✅ Service Manager Test Suite Complete");
|
||||
print(" - Service lifecycle operations verified");
|
||||
print(" - Circle worker deployment tested");
|
||||
print(" - Cross-platform compatibility confirmed");
|
||||
print(" - Ready for production deployment");
|
||||
} else {
|
||||
print("⚠️ Some tests failed. Please review the output above.");
|
||||
}
|
||||
|
||||
print("");
|
||||
print("🔗 Related Documentation:");
|
||||
print(" - Service Manager README: service_manager/README.md");
|
||||
print(" - API Documentation: docs.rs/sal-service-manager");
|
||||
print(" - Examples: examples/service_manager/");
|
||||
print(" - Integration Guide: SAL documentation");
|
||||
|
||||
print("");
|
||||
print("🚀 Next Steps:");
|
||||
print(" 1. Review test results");
|
||||
print(" 2. Address any failures");
|
||||
print(" 3. Run integration tests with actual services");
|
||||
print(" 4. Deploy to production environment");
|
||||
print(" 5. Monitor service manager performance");
|
||||
@@ -1,333 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# SAL Publishing Script
|
||||
# This script publishes all SAL crates to crates.io in the correct dependency order
|
||||
# Handles path dependencies, version updates, and rate limiting
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
DRY_RUN=false
|
||||
WAIT_TIME=15 # Seconds to wait between publishes
|
||||
VERSION=""
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
--wait)
|
||||
WAIT_TIME="$2"
|
||||
shift 2
|
||||
;;
|
||||
--version)
|
||||
VERSION="$2"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --dry-run Show what would be published without actually publishing"
|
||||
echo " --wait SECONDS Time to wait between publishes (default: 15)"
|
||||
echo " --version VER Set version for all crates"
|
||||
echo " -h, --help Show this help message"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Crates to publish in dependency order
|
||||
CRATES=(
|
||||
"os"
|
||||
"process"
|
||||
"text"
|
||||
"net"
|
||||
"git"
|
||||
"vault"
|
||||
"kubernetes"
|
||||
"virt"
|
||||
"redisclient"
|
||||
"postgresclient"
|
||||
"zinit_client"
|
||||
"service_manager"
|
||||
"mycelium"
|
||||
"rhai"
|
||||
)
|
||||
|
||||
echo -e "${BLUE}===============================================${NC}"
|
||||
echo -e "${BLUE} SAL Publishing Script${NC}"
|
||||
echo -e "${BLUE}===============================================${NC}"
|
||||
echo ""
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
echo -e "${YELLOW}🔍 DRY RUN MODE - No actual publishing will occur${NC}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Check if we're in the right directory
|
||||
if [ ! -f "Cargo.toml" ] || [ ! -d "os" ] || [ ! -d "git" ]; then
|
||||
echo -e "${RED}❌ Error: This script must be run from the SAL repository root${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if cargo is available
|
||||
if ! command -v cargo &> /dev/null; then
|
||||
echo -e "${RED}❌ Error: cargo is not installed or not in PATH${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if user is logged in to crates.io
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
if ! cargo login --help &> /dev/null; then
|
||||
echo -e "${RED}❌ Error: Please run 'cargo login' first${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Update version if specified
|
||||
if [ -n "$VERSION" ]; then
|
||||
echo -e "${YELLOW}📝 Updating version to $VERSION...${NC}"
|
||||
|
||||
# Update root Cargo.toml
|
||||
sed -i.bak "s/^version = \".*\"/version = \"$VERSION\"/" Cargo.toml
|
||||
|
||||
# Update each crate's Cargo.toml
|
||||
for crate in "${CRATES[@]}"; do
|
||||
if [ -f "$crate/Cargo.toml" ]; then
|
||||
sed -i.bak "s/^version = \".*\"/version = \"$VERSION\"/" "$crate/Cargo.toml"
|
||||
echo " ✅ Updated $crate to version $VERSION"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Run tests before publishing
|
||||
echo -e "${YELLOW}🧪 Running tests...${NC}"
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
if ! cargo test --workspace; then
|
||||
echo -e "${RED}❌ Tests failed! Aborting publish.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${GREEN}✅ All tests passed${NC}"
|
||||
else
|
||||
echo -e "${YELLOW} (Skipped in dry-run mode)${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Check for uncommitted changes
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
if ! git diff --quiet; then
|
||||
echo -e "${YELLOW}⚠️ Warning: You have uncommitted changes${NC}"
|
||||
read -p "Continue anyway? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo -e "${RED}❌ Aborted by user${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Function to check if a crate version is already published
|
||||
is_published() {
|
||||
local crate_name="$1"
|
||||
local version="$2"
|
||||
|
||||
# Handle special cases for directory names that differ from published package names
|
||||
local package_name="sal-$crate_name"
|
||||
if [ "$crate_name" = "zinit_client" ]; then
|
||||
package_name="sal-zinit-client"
|
||||
elif [ "$crate_name" = "service_manager" ]; then
|
||||
package_name="sal-service-manager"
|
||||
fi
|
||||
|
||||
# Use cargo search to check if the exact version exists
|
||||
if cargo search "$package_name" --limit 1 | grep -q "$package_name.*$version"; then
|
||||
return 0 # Already published
|
||||
else
|
||||
return 1 # Not published
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to update dependencies in a crate's Cargo.toml
|
||||
update_dependencies() {
|
||||
local crate_dir="$1"
|
||||
local version="$2"
|
||||
|
||||
if [ ! -f "$crate_dir/Cargo.toml" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Create backup
|
||||
cp "$crate_dir/Cargo.toml" "$crate_dir/Cargo.toml.bak"
|
||||
|
||||
# Update all SAL path dependencies to version dependencies
|
||||
sed -i.tmp "s|sal-text = { path = \"../text\" }|sal-text = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-os = { path = \"../os\" }|sal-os = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-process = { path = \"../process\" }|sal-process = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-git = { path = \"../git\" }|sal-git = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-vault = { path = \"../vault\" }|sal-vault = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-net = { path = \"../net\" }|sal-net = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-kubernetes = { path = \"../kubernetes\" }|sal-kubernetes = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-redisclient = { path = \"../redisclient\" }|sal-redisclient = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-postgresclient = { path = \"../postgresclient\" }|sal-postgresclient = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-virt = { path = \"../virt\" }|sal-virt = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-mycelium = { path = \"../mycelium\" }|sal-mycelium = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-zinit-client = { path = \"../zinit_client\" }|sal-zinit-client = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
sed -i.tmp "s|sal-service-manager = { path = \"../service_manager\" }|sal-service-manager = \"$version\"|g" "$crate_dir/Cargo.toml"
|
||||
|
||||
# Clean up temporary files
|
||||
rm -f "$crate_dir/Cargo.toml.tmp"
|
||||
}
|
||||
|
||||
# Function to restore dependencies from backup
|
||||
restore_dependencies() {
|
||||
local crate_dir="$1"
|
||||
|
||||
if [ -f "$crate_dir/Cargo.toml.bak" ]; then
|
||||
mv "$crate_dir/Cargo.toml.bak" "$crate_dir/Cargo.toml"
|
||||
fi
|
||||
}
|
||||
|
||||
# Publish individual crates
|
||||
echo -e "${BLUE}📦 Publishing individual crates...${NC}"
|
||||
echo ""
|
||||
|
||||
for crate in "${CRATES[@]}"; do
|
||||
echo -e "${YELLOW}Publishing sal-$crate...${NC}"
|
||||
|
||||
if [ ! -d "$crate" ]; then
|
||||
echo -e "${RED} ❌ Directory $crate not found${NC}"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check if already published
|
||||
if [ "$DRY_RUN" = false ] && is_published "$crate" "$VERSION"; then
|
||||
# Handle special cases for display names
|
||||
display_name="sal-$crate"
|
||||
if [ "$crate" = "zinit_client" ]; then
|
||||
display_name="sal-zinit-client"
|
||||
elif [ "$crate" = "service_manager" ]; then
|
||||
display_name="sal-service-manager"
|
||||
fi
|
||||
echo -e "${GREEN} ✅ $display_name@$VERSION already published, skipping${NC}"
|
||||
echo ""
|
||||
continue
|
||||
fi
|
||||
|
||||
# Update dependencies to use version numbers
|
||||
echo -e "${BLUE} 📝 Updating dependencies for sal-$crate...${NC}"
|
||||
update_dependencies "$crate" "$VERSION"
|
||||
|
||||
cd "$crate"
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
echo -e "${BLUE} 🔍 Would run: cargo publish --allow-dirty${NC}"
|
||||
if is_published "$crate" "$VERSION"; then
|
||||
# Handle special case for zinit_client display name
|
||||
display_name="sal-$crate"
|
||||
if [ "$crate" = "zinit_client" ]; then
|
||||
display_name="sal-zinit-client"
|
||||
fi
|
||||
echo -e "${YELLOW} 📝 Note: $display_name@$VERSION already exists${NC}"
|
||||
fi
|
||||
else
|
||||
# Handle special case for zinit_client display name
|
||||
display_name="sal-$crate"
|
||||
if [ "$crate" = "zinit_client" ]; then
|
||||
display_name="sal-zinit-client"
|
||||
fi
|
||||
|
||||
if cargo publish --allow-dirty; then
|
||||
echo -e "${GREEN} ✅ $display_name published successfully${NC}"
|
||||
else
|
||||
echo -e "${RED} ❌ Failed to publish $display_name${NC}"
|
||||
cd ..
|
||||
restore_dependencies "$crate"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
cd ..
|
||||
|
||||
# Restore original dependencies
|
||||
restore_dependencies "$crate"
|
||||
|
||||
# Wait between publishes (except for the last one)
|
||||
if [ "$DRY_RUN" = false ]; then
|
||||
# Get the last element of the array
|
||||
last_crate="${CRATES[${#CRATES[@]}-1]}"
|
||||
if [ "$crate" != "$last_crate" ]; then
|
||||
echo -e "${BLUE} ⏳ Waiting $WAIT_TIME seconds for crates.io to process...${NC}"
|
||||
sleep "$WAIT_TIME"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
done
|
||||
|
||||
# Publish main crate
|
||||
echo -e "${BLUE}📦 Publishing main sal crate...${NC}"
|
||||
|
||||
# Check if main crate is already published
|
||||
if [ "$DRY_RUN" = false ] && cargo search "sal" --limit 1 | grep -q "sal.*$VERSION"; then
|
||||
echo -e "${GREEN}✅ sal@$VERSION already published, skipping${NC}"
|
||||
else
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
echo -e "${BLUE}🔍 Would run: cargo publish --allow-dirty${NC}"
|
||||
if cargo search "sal" --limit 1 | grep -q "sal.*$VERSION"; then
|
||||
echo -e "${YELLOW}📝 Note: sal@$VERSION already exists${NC}"
|
||||
fi
|
||||
else
|
||||
if cargo publish --allow-dirty; then
|
||||
echo -e "${GREEN}✅ Main sal crate published successfully${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ Failed to publish main sal crate${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean up any remaining backup files
|
||||
echo -e "${BLUE}🧹 Cleaning up backup files...${NC}"
|
||||
find . -name "Cargo.toml.bak" -delete 2>/dev/null || true
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}===============================================${NC}"
|
||||
echo -e "${GREEN} Publishing Complete!${NC}"
|
||||
echo -e "${GREEN}===============================================${NC}"
|
||||
echo ""
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
echo -e "${YELLOW}🔍 This was a dry run. No crates were actually published.${NC}"
|
||||
echo -e "${YELLOW} Run without --dry-run to publish for real.${NC}"
|
||||
else
|
||||
echo -e "${GREEN}🎉 All SAL crates have been published to crates.io!${NC}"
|
||||
echo ""
|
||||
echo "Users can now install SAL modules with:"
|
||||
echo ""
|
||||
echo -e "${BLUE}# Individual crates${NC}"
|
||||
echo "cargo add sal-os sal-process sal-text"
|
||||
echo ""
|
||||
echo -e "${BLUE}# Meta-crate with features${NC}"
|
||||
echo "cargo add sal --features core"
|
||||
echo "cargo add sal --features all"
|
||||
echo ""
|
||||
echo "📚 See PUBLISHING.md for complete usage documentation."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
@@ -2,42 +2,21 @@
|
||||
name = "sal-service-manager"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["PlanetFirst <info@incubaid.com>"]
|
||||
description = "SAL Service Manager - Cross-platform service management for dynamic worker deployment"
|
||||
repository = "https://git.threefold.info/herocode/sal"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
# Use workspace dependencies for consistency
|
||||
async-trait = "0.1"
|
||||
thiserror = "1.0"
|
||||
tokio = { workspace = true }
|
||||
log = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
# Use base zinit-client instead of SAL wrapper
|
||||
zinit-client = { version = "0.4.0" }
|
||||
# Optional Rhai integration
|
||||
rhai = { workspace = true, optional = true }
|
||||
serde_json = { workspace = true, optional = true }
|
||||
|
||||
zinit_client = { package = "sal-zinit-client", path = "../zinit_client", optional = true }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
# macOS-specific dependencies for launchctl
|
||||
plist = "1.6"
|
||||
|
||||
[features]
|
||||
default = ["zinit"]
|
||||
zinit = []
|
||||
rhai = ["dep:rhai"]
|
||||
|
||||
# Enable zinit feature for tests
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
rhai = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
env_logger = "0.10"
|
||||
|
||||
[[test]]
|
||||
name = "zinit_integration_tests"
|
||||
required-features = ["zinit"]
|
||||
default = []
|
||||
zinit = ["dep:zinit_client", "dep:serde_json"]
|
||||
@@ -1,20 +1,16 @@
|
||||
# SAL Service Manager
|
||||
# Service Manager
|
||||
|
||||
[](https://crates.io/crates/sal-service-manager)
|
||||
[](https://docs.rs/sal-service-manager)
|
||||
|
||||
A cross-platform service management library for the System Abstraction Layer (SAL). This crate provides a unified interface for managing system services across different platforms, enabling dynamic deployment of workers and services.
|
||||
This crate provides a unified interface for managing system services across different platforms.
|
||||
It abstracts the underlying service management system (like `launchctl` on macOS or `systemd` on Linux),
|
||||
allowing you to start, stop, and monitor services with a consistent API.
|
||||
|
||||
## Features
|
||||
|
||||
- **Cross-platform service management** - Unified API across macOS and Linux
|
||||
- **Dynamic worker deployment** - Perfect for circle workers and on-demand services
|
||||
- **Platform-specific implementations**:
|
||||
- **macOS**: Uses `launchctl` with plist management
|
||||
- **Linux**: Uses `zinit` for lightweight service management (systemd also available)
|
||||
- **Complete lifecycle management** - Start, stop, restart, status monitoring, and log retrieval
|
||||
- **Service configuration** - Environment variables, working directories, auto-restart
|
||||
- **Production-ready** - Comprehensive error handling and resource management
|
||||
- A `ServiceManager` trait defining a common interface for service operations.
|
||||
- Platform-specific implementations for:
|
||||
- macOS (`launchctl`)
|
||||
- Linux (`systemd`)
|
||||
- A factory function `create_service_manager` that returns the appropriate manager for the current platform.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -22,55 +18,13 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-service-manager = "0.1.0"
|
||||
service_manager = { path = "../service_manager" }
|
||||
```
|
||||
|
||||
Or use it as part of the SAL ecosystem:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal = { version = "0.1.0", features = ["service_manager"] }
|
||||
```
|
||||
|
||||
## Primary Use Case: Dynamic Circle Worker Management
|
||||
|
||||
This service manager was designed specifically for dynamic deployment of circle workers in freezone environments. When a new resident registers, you can instantly launch a dedicated circle worker:
|
||||
Here is an example of how to use the `ServiceManager`:
|
||||
|
||||
```rust,no_run
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig};
|
||||
use std::collections::HashMap;
|
||||
|
||||
// New resident registration triggers worker creation
|
||||
fn deploy_circle_worker(resident_id: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let manager = create_service_manager();
|
||||
|
||||
let mut env = HashMap::new();
|
||||
env.insert("RESIDENT_ID".to_string(), resident_id.to_string());
|
||||
env.insert("WORKER_TYPE".to_string(), "circle".to_string());
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: format!("circle-worker-{}", resident_id),
|
||||
binary_path: "/usr/bin/circle-worker".to_string(),
|
||||
args: vec!["--resident".to_string(), resident_id.to_string()],
|
||||
working_directory: Some("/var/lib/circle-workers".to_string()),
|
||||
environment: env,
|
||||
auto_restart: true,
|
||||
};
|
||||
|
||||
// Deploy the worker
|
||||
manager.start(&config)?;
|
||||
println!("✅ Circle worker deployed for resident: {}", resident_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Basic Usage Example
|
||||
|
||||
Here is an example of the core service management API:
|
||||
|
||||
```rust,no_run
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig};
|
||||
use service_manager::{create_service_manager, ServiceConfig};
|
||||
use std::collections::HashMap;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
@@ -98,101 +52,3 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
Comprehensive examples are available in the SAL examples directory:
|
||||
|
||||
### Circle Worker Manager Example
|
||||
|
||||
The primary use case - dynamically launching circle workers for new freezone residents:
|
||||
|
||||
```bash
|
||||
# Run the circle worker management example
|
||||
herodo examples/service_manager/circle_worker_manager.rhai
|
||||
```
|
||||
|
||||
This example demonstrates:
|
||||
- Creating service configurations for circle workers
|
||||
- Complete service lifecycle management
|
||||
- Error handling and status monitoring
|
||||
- Service cleanup and removal
|
||||
|
||||
### Basic Usage Example
|
||||
|
||||
A simpler example showing the core API:
|
||||
|
||||
```bash
|
||||
# Run the basic usage example
|
||||
herodo examples/service_manager/basic_usage.rhai
|
||||
```
|
||||
|
||||
See `examples/service_manager/README.md` for detailed documentation.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the test suite:
|
||||
|
||||
```bash
|
||||
cargo test -p sal-service-manager
|
||||
```
|
||||
|
||||
For Rhai integration tests:
|
||||
|
||||
```bash
|
||||
cargo test -p sal-service-manager --features rhai
|
||||
```
|
||||
|
||||
### Testing with Herodo
|
||||
|
||||
To test the service manager with real Rhai scripts using herodo, first build herodo:
|
||||
|
||||
```bash
|
||||
./build_herodo.sh
|
||||
```
|
||||
|
||||
Then run Rhai scripts that use the service manager:
|
||||
|
||||
```bash
|
||||
herodo your_service_script.rhai
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Linux (zinit/systemd)
|
||||
|
||||
The service manager automatically discovers running zinit servers and falls back to systemd if none are found.
|
||||
|
||||
**For zinit (recommended):**
|
||||
|
||||
```bash
|
||||
# Start zinit with default socket
|
||||
zinit -s /tmp/zinit.sock init
|
||||
|
||||
# Or with a custom socket path
|
||||
zinit -s /var/run/zinit.sock init
|
||||
```
|
||||
|
||||
**Socket Discovery:**
|
||||
The service manager will automatically find running zinit servers by checking:
|
||||
1. `ZINIT_SOCKET_PATH` environment variable (if set)
|
||||
2. Common socket locations: `/var/run/zinit.sock`, `/tmp/zinit.sock`, `/run/zinit.sock`, `./zinit.sock`
|
||||
|
||||
**Custom socket path:**
|
||||
```bash
|
||||
# Set custom socket path
|
||||
export ZINIT_SOCKET_PATH=/your/custom/path/zinit.sock
|
||||
```
|
||||
|
||||
**Systemd fallback:**
|
||||
If no zinit server is detected, the service manager automatically falls back to systemd.
|
||||
|
||||
### macOS (launchctl)
|
||||
|
||||
No additional setup required - uses the built-in launchctl system.
|
||||
|
||||
## Platform Support
|
||||
|
||||
- **macOS**: Full support using `launchctl` for service management
|
||||
- **Linux**: Full support using `zinit` for service management (systemd also available as alternative)
|
||||
- **Windows**: Not currently supported
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# Service Manager Examples
|
||||
|
||||
This directory contains examples demonstrating the usage of the `sal-service-manager` crate.
|
||||
|
||||
## Running Examples
|
||||
|
||||
To run any example, use the following command structure from the `service_manager` crate's root directory:
|
||||
|
||||
```sh
|
||||
cargo run --example <EXAMPLE_NAME>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 1. `simple_service`
|
||||
|
||||
This example demonstrates the ideal, clean lifecycle of a service using the separated `create` and `start` steps.
|
||||
|
||||
**Behavior:**
|
||||
1. Creates a new service definition.
|
||||
2. Starts the newly created service.
|
||||
3. Checks its status to confirm it's running.
|
||||
4. Stops the service.
|
||||
5. Checks its status again to confirm it's stopped.
|
||||
6. Removes the service definition.
|
||||
|
||||
**Run it:**
|
||||
```sh
|
||||
cargo run --example simple_service
|
||||
```
|
||||
|
||||
### 2. `service_spaghetti`
|
||||
|
||||
This example demonstrates how the service manager handles "messy" or improper sequences of operations, showcasing its error handling and robustness.
|
||||
|
||||
**Behavior:**
|
||||
1. Creates a service.
|
||||
2. Starts the service.
|
||||
3. Tries to start the **same service again** (which should fail as it's already running).
|
||||
4. Removes the service **without stopping it first** (the manager should handle this gracefully).
|
||||
5. Tries to stop the **already removed** service (which should fail).
|
||||
6. Tries to remove the service **again** (which should also fail).
|
||||
|
||||
**Run it:**
|
||||
```sh
|
||||
cargo run --example service_spaghetti
|
||||
```
|
||||
@@ -1,109 +0,0 @@
|
||||
//! service_spaghetti - An example of messy service management.
|
||||
//!
|
||||
//! This example demonstrates how the service manager behaves when commands
|
||||
//! are issued in a less-than-ideal order, such as starting a service that's
|
||||
//! already running or removing a service that hasn't been stopped.
|
||||
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig};
|
||||
use std::collections::HashMap;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
fn main() {
|
||||
// Initialize logging to see socket discovery in action
|
||||
env_logger::init();
|
||||
|
||||
let manager = match create_service_manager() {
|
||||
Ok(manager) => manager,
|
||||
Err(e) => {
|
||||
eprintln!("Error: Failed to create service manager: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let service_name = "com.herocode.examples.spaghetti";
|
||||
|
||||
let service_config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
binary_path: "/bin/sh".to_string(),
|
||||
args: vec![
|
||||
"-c".to_string(),
|
||||
"while true; do echo 'Spaghetti service is running...'; sleep 5; done".to_string(),
|
||||
],
|
||||
working_directory: None,
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
println!("--- Service Spaghetti Example ---");
|
||||
println!("This example demonstrates messy, error-prone service management.");
|
||||
|
||||
// Cleanup from previous runs to ensure a clean slate
|
||||
if let Ok(true) = manager.exists(service_name) {
|
||||
println!(
|
||||
"\nService '{}' found from a previous run. Cleaning up first.",
|
||||
service_name
|
||||
);
|
||||
let _ = manager.stop(service_name);
|
||||
let _ = manager.remove(service_name);
|
||||
println!("Cleanup complete.");
|
||||
}
|
||||
|
||||
// 1. Start the service (creates and starts in one step)
|
||||
println!("\n1. Starting the service for the first time...");
|
||||
match manager.start(&service_config) {
|
||||
Ok(()) => println!(" -> Success: Service '{}' started.", service_name),
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
" -> Error: Failed to start service: {}. Halting example.",
|
||||
e
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
|
||||
// 2. Try to start the service again while it's already running
|
||||
println!("\n2. Trying to start the *same service* again...");
|
||||
match manager.start(&service_config) {
|
||||
Ok(()) => println!(" -> Unexpected Success: Service started again."),
|
||||
Err(e) => eprintln!(
|
||||
" -> Expected Error: {}. The manager should detect it is already running.",
|
||||
e
|
||||
),
|
||||
}
|
||||
|
||||
// 3. Let it run for a bit
|
||||
println!("\n3. Letting the service run for 5 seconds...");
|
||||
thread::sleep(Duration::from_secs(5));
|
||||
|
||||
// 4. Remove the service without stopping it first
|
||||
// The `remove` function is designed to stop the service if it's running.
|
||||
println!("\n4. Removing the service without explicitly stopping it first...");
|
||||
match manager.remove(service_name) {
|
||||
Ok(()) => println!(" -> Success: Service was stopped and removed."),
|
||||
Err(e) => eprintln!(" -> Error: Failed to remove service: {}", e),
|
||||
}
|
||||
|
||||
// 5. Try to stop the service after it has been removed
|
||||
println!("\n5. Trying to stop the service that was just removed...");
|
||||
match manager.stop(service_name) {
|
||||
Ok(()) => println!(" -> Unexpected Success: Stopped a removed service."),
|
||||
Err(e) => eprintln!(
|
||||
" -> Expected Error: {}. The manager knows the service is gone.",
|
||||
e
|
||||
),
|
||||
}
|
||||
|
||||
// 6. Try to remove the service again
|
||||
println!("\n6. Trying to remove the service again...");
|
||||
match manager.remove(service_name) {
|
||||
Ok(()) => println!(" -> Unexpected Success: Removed a non-existent service."),
|
||||
Err(e) => eprintln!(
|
||||
" -> Expected Error: {}. The manager correctly reports it's not found.",
|
||||
e
|
||||
),
|
||||
}
|
||||
|
||||
println!("\n--- Spaghetti Example Finished ---");
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig};
|
||||
use std::collections::HashMap;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
fn main() {
|
||||
// Initialize logging to see socket discovery in action
|
||||
env_logger::init();
|
||||
|
||||
// 1. Create a service manager for the current platform
|
||||
let manager = match create_service_manager() {
|
||||
Ok(manager) => manager,
|
||||
Err(e) => {
|
||||
eprintln!("Error: Failed to create service manager: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// 2. Define the configuration for our new service
|
||||
let service_name = "com.herocode.examples.simpleservice";
|
||||
let service_config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
// A simple command that runs in a loop
|
||||
binary_path: "/bin/sh".to_string(),
|
||||
args: vec![
|
||||
"-c".to_string(),
|
||||
"while true; do echo 'Simple service is running...'; date; sleep 5; done".to_string(),
|
||||
],
|
||||
working_directory: None,
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
println!("--- Service Manager Example ---");
|
||||
|
||||
// Cleanup from previous runs, if necessary
|
||||
if let Ok(true) = manager.exists(service_name) {
|
||||
println!(
|
||||
"Service '{}' already exists. Cleaning up before starting.",
|
||||
service_name
|
||||
);
|
||||
if let Err(e) = manager.stop(service_name) {
|
||||
println!(
|
||||
"Note: could not stop existing service (it might not be running): {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
if let Err(e) = manager.remove(service_name) {
|
||||
eprintln!("Error: failed to remove existing service: {}", e);
|
||||
return;
|
||||
}
|
||||
println!("Cleanup complete.");
|
||||
}
|
||||
|
||||
// 3. Start the service (creates and starts in one step)
|
||||
println!("\n1. Starting service: '{}'", service_name);
|
||||
match manager.start(&service_config) {
|
||||
Ok(()) => println!("Service '{}' started successfully.", service_name),
|
||||
Err(e) => {
|
||||
eprintln!("Error: Failed to start service '{}': {}", service_name, e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Give it a moment to run
|
||||
println!("\nWaiting for 2 seconds for the service to initialize...");
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
|
||||
// 4. Check the status of the service
|
||||
println!("\n2. Checking service status...");
|
||||
match manager.status(service_name) {
|
||||
Ok(status) => println!("Service status: {:?}", status),
|
||||
Err(e) => eprintln!(
|
||||
"Error: Failed to get status for service '{}': {}",
|
||||
service_name, e
|
||||
),
|
||||
}
|
||||
|
||||
println!("\nLetting the service run for 10 seconds. Check logs if you can.");
|
||||
thread::sleep(Duration::from_secs(10));
|
||||
|
||||
// 5. Stop the service
|
||||
println!("\n3. Stopping service: '{}'", service_name);
|
||||
match manager.stop(service_name) {
|
||||
Ok(()) => println!("Service '{}' stopped successfully.", service_name),
|
||||
Err(e) => eprintln!("Error: Failed to stop service '{}': {}", service_name, e),
|
||||
}
|
||||
|
||||
println!("\nWaiting for 2 seconds for the service to stop...");
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
|
||||
// Check status again
|
||||
println!("\n4. Checking status after stopping...");
|
||||
match manager.status(service_name) {
|
||||
Ok(status) => println!("Service status: {:?}", status),
|
||||
Err(e) => eprintln!(
|
||||
"Error: Failed to get status for service '{}': {}",
|
||||
service_name, e
|
||||
),
|
||||
}
|
||||
|
||||
// 6. Remove the service
|
||||
println!("\n5. Removing service: '{}'", service_name);
|
||||
match manager.remove(service_name) {
|
||||
Ok(()) => println!("Service '{}' removed successfully.", service_name),
|
||||
Err(e) => eprintln!("Error: Failed to remove service '{}': {}", service_name, e),
|
||||
}
|
||||
|
||||
println!("\n--- Example Finished ---");
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
//! Socket Discovery Test
|
||||
//!
|
||||
//! This example demonstrates the zinit socket discovery functionality.
|
||||
//! It shows how the service manager finds available zinit sockets.
|
||||
|
||||
use sal_service_manager::create_service_manager;
|
||||
|
||||
fn main() {
|
||||
// Initialize logging to see socket discovery in action
|
||||
env_logger::init();
|
||||
|
||||
println!("=== Zinit Socket Discovery Test ===");
|
||||
println!("This test demonstrates how the service manager discovers zinit sockets.");
|
||||
println!();
|
||||
|
||||
// Test environment variable
|
||||
if let Ok(socket_path) = std::env::var("ZINIT_SOCKET_PATH") {
|
||||
println!("🔍 ZINIT_SOCKET_PATH environment variable set to: {}", socket_path);
|
||||
} else {
|
||||
println!("🔍 ZINIT_SOCKET_PATH environment variable not set");
|
||||
}
|
||||
println!();
|
||||
|
||||
println!("🚀 Creating service manager...");
|
||||
match create_service_manager() {
|
||||
Ok(_manager) => {
|
||||
println!("✅ Service manager created successfully!");
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
println!("📱 Platform: macOS - Using launchctl");
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
println!("🐧 Platform: Linux - Check logs above for socket discovery details");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Failed to create service manager: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("=== Test Complete ===");
|
||||
println!();
|
||||
println!("To test zinit socket discovery on Linux:");
|
||||
println!("1. Start zinit: zinit -s /tmp/zinit.sock init");
|
||||
println!("2. Run with logging: RUST_LOG=debug cargo run --example socket_discovery_test -p sal-service-manager");
|
||||
println!("3. Or set custom path: ZINIT_SOCKET_PATH=/custom/path.sock RUST_LOG=debug cargo run --example socket_discovery_test -p sal-service-manager");
|
||||
}
|
||||
@@ -1,30 +1,9 @@
|
||||
use crate::{ServiceConfig, ServiceManager, ServiceManagerError, ServiceStatus};
|
||||
use once_cell::sync::Lazy;
|
||||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use tokio::process::Command;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
// Shared runtime for async operations - production-safe initialization
|
||||
static ASYNC_RUNTIME: Lazy<Option<Runtime>> = Lazy::new(|| Runtime::new().ok());
|
||||
|
||||
/// Get the async runtime, creating a temporary one if the static runtime failed
|
||||
fn get_runtime() -> Result<Runtime, ServiceManagerError> {
|
||||
// Try to use the static runtime first
|
||||
if let Some(_runtime) = ASYNC_RUNTIME.as_ref() {
|
||||
// We can't return a reference to the static runtime because we need ownership
|
||||
// for block_on, so we create a new one. This is a reasonable trade-off for safety.
|
||||
Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create async runtime: {}", e))
|
||||
})
|
||||
} else {
|
||||
// Static runtime failed, try to create a new one
|
||||
Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create async runtime: {}", e))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LaunchctlServiceManager {
|
||||
@@ -39,10 +18,7 @@ struct LaunchDaemon {
|
||||
program_arguments: Vec<String>,
|
||||
#[serde(rename = "WorkingDirectory", skip_serializing_if = "Option::is_none")]
|
||||
working_directory: Option<String>,
|
||||
#[serde(
|
||||
rename = "EnvironmentVariables",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
#[serde(rename = "EnvironmentVariables", skip_serializing_if = "Option::is_none")]
|
||||
environment_variables: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "KeepAlive", skip_serializing_if = "Option::is_none")]
|
||||
keep_alive: Option<bool>,
|
||||
@@ -109,11 +85,7 @@ impl LaunchctlServiceManager {
|
||||
} else {
|
||||
Some(config.environment.clone())
|
||||
},
|
||||
keep_alive: if config.auto_restart {
|
||||
Some(true)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
keep_alive: if config.auto_restart { Some(true) } else { None },
|
||||
run_at_load: true,
|
||||
standard_out_path: Some(log_path.to_string_lossy().to_string()),
|
||||
standard_error_path: Some(log_path.to_string_lossy().to_string()),
|
||||
@@ -122,9 +94,8 @@ impl LaunchctlServiceManager {
|
||||
let mut plist_content = Vec::new();
|
||||
plist::to_writer_xml(&mut plist_content, &launch_daemon)
|
||||
.map_err(|e| ServiceManagerError::Other(format!("Failed to serialize plist: {}", e)))?;
|
||||
let plist_content = String::from_utf8(plist_content).map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to convert plist to string: {}", e))
|
||||
})?;
|
||||
let plist_content = String::from_utf8(plist_content)
|
||||
.map_err(|e| ServiceManagerError::Other(format!("Failed to convert plist to string: {}", e)))?;
|
||||
|
||||
tokio::fs::write(&plist_path, plist_content).await?;
|
||||
|
||||
@@ -132,7 +103,10 @@ impl LaunchctlServiceManager {
|
||||
}
|
||||
|
||||
async fn run_launchctl(&self, args: &[&str]) -> Result<String, ServiceManagerError> {
|
||||
let output = Command::new("launchctl").args(args).output().await?;
|
||||
let output = Command::new("launchctl")
|
||||
.args(args)
|
||||
.output()
|
||||
.await?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
@@ -145,16 +119,12 @@ impl LaunchctlServiceManager {
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
|
||||
async fn wait_for_service_status(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
use tokio::time::{sleep, timeout, Duration};
|
||||
|
||||
async fn wait_for_service_status(&self, service_name: &str, timeout_secs: u64) -> Result<(), ServiceManagerError> {
|
||||
use tokio::time::{sleep, Duration, timeout};
|
||||
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
let poll_interval = Duration::from_millis(500);
|
||||
|
||||
|
||||
let result = timeout(timeout_duration, async {
|
||||
loop {
|
||||
match self.status(service_name) {
|
||||
@@ -170,65 +140,45 @@ impl LaunchctlServiceManager {
|
||||
// Extract error lines from logs
|
||||
let error_lines: Vec<&str> = logs
|
||||
.lines()
|
||||
.filter(|line| {
|
||||
line.to_lowercase().contains("error")
|
||||
|| line.to_lowercase().contains("failed")
|
||||
})
|
||||
.filter(|line| line.to_lowercase().contains("error") || line.to_lowercase().contains("failed"))
|
||||
.take(3)
|
||||
.collect();
|
||||
|
||||
|
||||
if error_lines.is_empty() {
|
||||
format!(
|
||||
"Service failed to start. Recent logs:\n{}",
|
||||
logs.lines()
|
||||
.rev()
|
||||
.take(5)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter()
|
||||
.rev()
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
)
|
||||
format!("Service failed to start. Recent logs:\n{}",
|
||||
logs.lines().rev().take(5).collect::<Vec<_>>().into_iter().rev().collect::<Vec<_>>().join("\n"))
|
||||
} else {
|
||||
format!(
|
||||
"Service failed to start. Errors:\n{}",
|
||||
error_lines.join("\n")
|
||||
)
|
||||
format!("Service failed to start. Errors:\n{}", error_lines.join("\n"))
|
||||
}
|
||||
};
|
||||
return Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
error_msg,
|
||||
));
|
||||
return Err(ServiceManagerError::StartFailed(service_name.to_string(), error_msg));
|
||||
}
|
||||
Ok(ServiceStatus::Stopped) | Ok(ServiceStatus::Unknown) => {
|
||||
// Still starting, continue polling
|
||||
sleep(poll_interval).await;
|
||||
}
|
||||
Err(ServiceManagerError::ServiceNotFound(_)) => {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
return Err(ServiceManagerError::ServiceNotFound(service_name.to_string()));
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
}).await;
|
||||
|
||||
match result {
|
||||
Ok(Ok(())) => Ok(()),
|
||||
Ok(Err(e)) => Err(e),
|
||||
Err(_) => Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
format!("Service did not start within {} seconds", timeout_secs)
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ServiceManager for LaunchctlServiceManager {
|
||||
fn exists(&self, service_name: &str) -> Result<bool, ServiceManagerError> {
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
@@ -236,17 +186,15 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
}
|
||||
|
||||
fn start(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
// Use production-safe runtime for async operations
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
// For synchronous version, we'll use blocking operations
|
||||
let rt = tokio::runtime::Runtime::new().map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
rt.block_on(async {
|
||||
let label = self.get_service_label(&config.name);
|
||||
|
||||
|
||||
// Check if service is already loaded
|
||||
let list_output = self.run_launchctl(&["list"]).await?;
|
||||
if list_output.contains(&label) {
|
||||
return Err(ServiceManagerError::ServiceAlreadyExists(
|
||||
config.name.clone(),
|
||||
));
|
||||
return Err(ServiceManagerError::ServiceAlreadyExists(config.name.clone()));
|
||||
}
|
||||
|
||||
// Create the plist file
|
||||
@@ -256,27 +204,23 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
let plist_path = self.get_plist_path(&config.name);
|
||||
self.run_launchctl(&["load", &plist_path.to_string_lossy()])
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::StartFailed(config.name.clone(), e.to_string())
|
||||
})?;
|
||||
.map_err(|e| ServiceManagerError::StartFailed(config.name.clone(), e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn start_existing(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let rt = tokio::runtime::Runtime::new().map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
rt.block_on(async {
|
||||
let label = self.get_service_label(service_name);
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
|
||||
|
||||
// Check if plist file exists
|
||||
if !plist_path.exists() {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
return Err(ServiceManagerError::ServiceNotFound(service_name.to_string()));
|
||||
}
|
||||
|
||||
|
||||
// Check if service is already loaded and running
|
||||
let list_output = self.run_launchctl(&["list"]).await?;
|
||||
if list_output.contains(&label) {
|
||||
@@ -287,72 +231,53 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
}
|
||||
_ => {
|
||||
// Service is loaded but not running, try to start it
|
||||
self.run_launchctl(&["start", &label]).await.map_err(|e| {
|
||||
ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
e.to_string(),
|
||||
)
|
||||
})?;
|
||||
self.run_launchctl(&["start", &label])
|
||||
.await
|
||||
.map_err(|e| ServiceManagerError::StartFailed(service_name.to_string(), e.to_string()))?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Service is not loaded, load it
|
||||
self.run_launchctl(&["load", &plist_path.to_string_lossy()])
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::StartFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
.map_err(|e| ServiceManagerError::StartFailed(service_name.to_string(), e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn start_and_confirm(
|
||||
&self,
|
||||
config: &ServiceConfig,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
async fn start_and_confirm(&self, config: &ServiceConfig, timeout_secs: u64) -> Result<(), ServiceManagerError> {
|
||||
// First start the service
|
||||
self.start(config)?;
|
||||
|
||||
// Then wait for confirmation using production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
self.wait_for_service_status(&config.name, timeout_secs)
|
||||
.await
|
||||
})
|
||||
|
||||
// Then wait for confirmation
|
||||
self.wait_for_service_status(&config.name, timeout_secs).await
|
||||
}
|
||||
|
||||
fn start_existing_and_confirm(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
async fn run(&self, config: &ServiceConfig, timeout_secs: u64) -> Result<(), ServiceManagerError> {
|
||||
self.start_and_confirm(config, timeout_secs).await
|
||||
}
|
||||
|
||||
async fn start_existing_and_confirm(&self, service_name: &str, timeout_secs: u64) -> Result<(), ServiceManagerError> {
|
||||
// First start the existing service
|
||||
self.start_existing(service_name)?;
|
||||
|
||||
// Then wait for confirmation using production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
self.wait_for_service_status(service_name, timeout_secs)
|
||||
.await
|
||||
})
|
||||
|
||||
// Then wait for confirmation
|
||||
self.wait_for_service_status(service_name, timeout_secs).await
|
||||
}
|
||||
|
||||
fn stop(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let rt = tokio::runtime::Runtime::new().map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
rt.block_on(async {
|
||||
let _label = self.get_service_label(service_name);
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
|
||||
// Unload the service
|
||||
self.run_launchctl(&["unload", &plist_path.to_string_lossy()])
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::StopFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
.map_err(|e| ServiceManagerError::StopFailed(service_name.to_string(), e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
@@ -363,10 +288,7 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
if let Err(e) = self.stop(service_name) {
|
||||
// If stop fails because service doesn't exist, that's ok for restart
|
||||
if !matches!(e, ServiceManagerError::ServiceNotFound(_)) {
|
||||
return Err(ServiceManagerError::RestartFailed(
|
||||
service_name.to_string(),
|
||||
e.to_string(),
|
||||
));
|
||||
return Err(ServiceManagerError::RestartFailed(service_name.to_string(), e.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -379,20 +301,18 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
}
|
||||
|
||||
fn status(&self, service_name: &str) -> Result<ServiceStatus, ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let rt = tokio::runtime::Runtime::new().map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
rt.block_on(async {
|
||||
let label = self.get_service_label(service_name);
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
|
||||
|
||||
// First check if the plist file exists
|
||||
if !plist_path.exists() {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
return Err(ServiceManagerError::ServiceNotFound(service_name.to_string()));
|
||||
}
|
||||
|
||||
|
||||
let list_output = self.run_launchctl(&["list"]).await?;
|
||||
|
||||
|
||||
if !list_output.contains(&label) {
|
||||
return Ok(ServiceStatus::Stopped);
|
||||
}
|
||||
@@ -413,15 +333,11 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
})
|
||||
}
|
||||
|
||||
fn logs(
|
||||
&self,
|
||||
service_name: &str,
|
||||
lines: Option<usize>,
|
||||
) -> Result<String, ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
fn logs(&self, service_name: &str, lines: Option<usize>) -> Result<String, ServiceManagerError> {
|
||||
let rt = tokio::runtime::Runtime::new().map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
rt.block_on(async {
|
||||
let log_path = self.get_log_path(service_name);
|
||||
|
||||
|
||||
if !log_path.exists() {
|
||||
return Ok(String::new());
|
||||
}
|
||||
@@ -443,10 +359,10 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
}
|
||||
|
||||
fn list(&self) -> Result<Vec<String>, ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let rt = tokio::runtime::Runtime::new().map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
rt.block_on(async {
|
||||
let list_output = self.run_launchctl(&["list"]).await?;
|
||||
|
||||
|
||||
let services: Vec<String> = list_output
|
||||
.lines()
|
||||
.filter_map(|line| {
|
||||
@@ -454,9 +370,7 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
// Extract service name from label
|
||||
line.split_whitespace()
|
||||
.last()
|
||||
.and_then(|label| {
|
||||
label.strip_prefix(&format!("{}.", self.service_prefix))
|
||||
})
|
||||
.and_then(|label| label.strip_prefix(&format!("{}.", self.service_prefix)))
|
||||
.map(|s| s.to_string())
|
||||
} else {
|
||||
None
|
||||
@@ -469,19 +383,12 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
}
|
||||
|
||||
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
// Try to stop the service first, but don't fail if it's already stopped or doesn't exist
|
||||
if let Err(e) = self.stop(service_name) {
|
||||
// Log the error but continue with removal
|
||||
log::warn!(
|
||||
"Failed to stop service '{}' before removal: {}",
|
||||
service_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
// Stop the service first
|
||||
let _ = self.stop(service_name);
|
||||
|
||||
// Remove the plist file using production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
// Remove the plist file
|
||||
let rt = tokio::runtime::Runtime::new().map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
rt.block_on(async {
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
if plist_path.exists() {
|
||||
tokio::fs::remove_file(&plist_path).await?;
|
||||
@@ -489,4 +396,4 @@ impl ServiceManager for LaunchctlServiceManager {
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
use async_trait::async_trait;
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
@@ -31,7 +32,7 @@ pub struct ServiceConfig {
|
||||
pub auto_restart: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ServiceStatus {
|
||||
Running,
|
||||
Stopped,
|
||||
@@ -39,46 +40,41 @@ pub enum ServiceStatus {
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ServiceManager: Send + Sync {
|
||||
/// Check if a service exists
|
||||
fn exists(&self, service_name: &str) -> Result<bool, ServiceManagerError>;
|
||||
|
||||
|
||||
/// Start a service with the given configuration
|
||||
fn start(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError>;
|
||||
|
||||
|
||||
/// Start an existing service by name (load existing plist/config)
|
||||
fn start_existing(&self, service_name: &str) -> Result<(), ServiceManagerError>;
|
||||
|
||||
|
||||
/// Start a service and wait for confirmation that it's running or failed
|
||||
fn start_and_confirm(
|
||||
&self,
|
||||
config: &ServiceConfig,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError>;
|
||||
|
||||
async fn start_and_confirm(&self, config: &ServiceConfig, timeout_secs: u64) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Start a service and wait for confirmation that it's running or failed
|
||||
async fn run(&self, config: &ServiceConfig, timeout_secs: u64) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Start an existing service and wait for confirmation that it's running or failed
|
||||
fn start_existing_and_confirm(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError>;
|
||||
|
||||
async fn start_existing_and_confirm(&self, service_name: &str, timeout_secs: u64) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Stop a service by name
|
||||
fn stop(&self, service_name: &str) -> Result<(), ServiceManagerError>;
|
||||
|
||||
|
||||
/// Restart a service by name
|
||||
fn restart(&self, service_name: &str) -> Result<(), ServiceManagerError>;
|
||||
|
||||
|
||||
/// Get the status of a service
|
||||
fn status(&self, service_name: &str) -> Result<ServiceStatus, ServiceManagerError>;
|
||||
|
||||
|
||||
/// Get logs for a service
|
||||
fn logs(&self, service_name: &str, lines: Option<usize>)
|
||||
-> Result<String, ServiceManagerError>;
|
||||
|
||||
fn logs(&self, service_name: &str, lines: Option<usize>) -> Result<String, ServiceManagerError>;
|
||||
|
||||
/// List all managed services
|
||||
fn list(&self) -> Result<Vec<String>, ServiceManagerError>;
|
||||
|
||||
|
||||
/// Remove a service configuration (stop if running)
|
||||
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError>;
|
||||
}
|
||||
@@ -94,208 +90,23 @@ mod systemd;
|
||||
#[cfg(target_os = "linux")]
|
||||
pub use systemd::SystemdServiceManager;
|
||||
|
||||
#[cfg(feature = "zinit")]
|
||||
mod zinit;
|
||||
#[cfg(feature = "zinit")]
|
||||
pub use zinit::ZinitServiceManager;
|
||||
|
||||
#[cfg(feature = "rhai")]
|
||||
pub mod rhai;
|
||||
|
||||
/// Discover available zinit socket paths
|
||||
///
|
||||
/// This function checks for zinit sockets in the following order:
|
||||
/// 1. Environment variable ZINIT_SOCKET_PATH (if set)
|
||||
/// 2. Common socket locations with connectivity testing
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Returns the first working socket path found, or None if no working zinit server is detected.
|
||||
#[cfg(target_os = "linux")]
|
||||
fn discover_zinit_socket() -> Option<String> {
|
||||
// First check environment variable
|
||||
if let Ok(env_socket_path) = std::env::var("ZINIT_SOCKET_PATH") {
|
||||
log::debug!("Checking ZINIT_SOCKET_PATH: {}", env_socket_path);
|
||||
if test_zinit_socket(&env_socket_path) {
|
||||
log::info!(
|
||||
"Using zinit socket from ZINIT_SOCKET_PATH: {}",
|
||||
env_socket_path
|
||||
);
|
||||
return Some(env_socket_path);
|
||||
} else {
|
||||
log::warn!(
|
||||
"ZINIT_SOCKET_PATH specified but socket is not accessible: {}",
|
||||
env_socket_path
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Try common socket locations
|
||||
let common_paths = [
|
||||
"/var/run/zinit.sock",
|
||||
"/tmp/zinit.sock",
|
||||
"/run/zinit.sock",
|
||||
"./zinit.sock",
|
||||
];
|
||||
|
||||
log::debug!("Discovering zinit socket from common locations...");
|
||||
for path in &common_paths {
|
||||
log::debug!("Testing socket path: {}", path);
|
||||
if test_zinit_socket(path) {
|
||||
log::info!("Found working zinit socket at: {}", path);
|
||||
return Some(path.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
log::debug!("No working zinit socket found");
|
||||
None
|
||||
}
|
||||
|
||||
/// Test if a zinit socket is accessible and responsive
|
||||
///
|
||||
/// This function attempts to create a ZinitServiceManager and perform a basic
|
||||
/// connectivity test by listing services.
|
||||
#[cfg(target_os = "linux")]
|
||||
fn test_zinit_socket(socket_path: &str) -> bool {
|
||||
// Check if socket file exists first
|
||||
if !std::path::Path::new(socket_path).exists() {
|
||||
log::debug!("Socket file does not exist: {}", socket_path);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to create a manager and test basic connectivity
|
||||
match ZinitServiceManager::new(socket_path) {
|
||||
Ok(manager) => {
|
||||
// Test basic connectivity by trying to list services
|
||||
match manager.list() {
|
||||
Ok(_) => {
|
||||
log::debug!("Socket {} is responsive", socket_path);
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
log::debug!("Socket {} exists but not responsive: {}", socket_path, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::debug!("Failed to create manager for socket {}: {}", socket_path, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a service manager appropriate for the current platform
|
||||
///
|
||||
/// - On macOS: Uses launchctl for service management
|
||||
/// - On Linux: Uses zinit for service management with systemd fallback
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Returns a Result containing the service manager or an error if initialization fails.
|
||||
/// On Linux, it first tries to discover a working zinit socket. If no zinit server is found,
|
||||
/// it will fall back to systemd.
|
||||
///
|
||||
/// # Environment Variables
|
||||
///
|
||||
/// - `ZINIT_SOCKET_PATH`: Specifies the zinit socket path (Linux only)
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `ServiceManagerError` if:
|
||||
/// - The platform is not supported (Windows, etc.)
|
||||
/// - Service manager initialization fails on all available backends
|
||||
pub fn create_service_manager() -> Result<Box<dyn ServiceManager>, ServiceManagerError> {
|
||||
// Factory function to create the appropriate service manager for the platform
|
||||
pub fn create_service_manager() -> Box<dyn ServiceManager> {
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
Ok(Box::new(LaunchctlServiceManager::new()))
|
||||
Box::new(LaunchctlServiceManager::new())
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
// Try to discover a working zinit socket
|
||||
if let Some(socket_path) = discover_zinit_socket() {
|
||||
match ZinitServiceManager::new(&socket_path) {
|
||||
Ok(zinit_manager) => {
|
||||
log::info!("Using zinit service manager with socket: {}", socket_path);
|
||||
return Ok(Box::new(zinit_manager));
|
||||
}
|
||||
Err(zinit_error) => {
|
||||
log::warn!(
|
||||
"Failed to create zinit manager for discovered socket {}: {}",
|
||||
socket_path,
|
||||
zinit_error
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log::info!("No running zinit server detected. To use zinit, start it with: zinit -s /tmp/zinit.sock init");
|
||||
}
|
||||
|
||||
// Fallback to systemd
|
||||
log::info!("Falling back to systemd service manager");
|
||||
Ok(Box::new(SystemdServiceManager::new()))
|
||||
Box::new(SystemdServiceManager::new())
|
||||
}
|
||||
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
|
||||
{
|
||||
Err(ServiceManagerError::Other(
|
||||
"Service manager not implemented for this platform".to_string(),
|
||||
))
|
||||
compile_error!("Service manager not implemented for this platform")
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a service manager for zinit with a custom socket path
|
||||
///
|
||||
/// This is useful when zinit is running with a non-default socket path
|
||||
pub fn create_zinit_service_manager(
|
||||
socket_path: &str,
|
||||
) -> Result<Box<dyn ServiceManager>, ServiceManagerError> {
|
||||
Ok(Box::new(ZinitServiceManager::new(socket_path)?))
|
||||
}
|
||||
|
||||
/// Create a service manager for systemd (Linux alternative)
|
||||
///
|
||||
/// This creates a systemd-based service manager as an alternative to zinit on Linux
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn create_systemd_service_manager() -> Box<dyn ServiceManager> {
|
||||
Box::new(SystemdServiceManager::new())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_service_manager() {
|
||||
// This test ensures the service manager can be created without panicking
|
||||
let result = create_service_manager();
|
||||
assert!(result.is_ok(), "Service manager creation should succeed");
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[test]
|
||||
fn test_socket_discovery_with_env_var() {
|
||||
// Test that environment variable is respected
|
||||
std::env::set_var("ZINIT_SOCKET_PATH", "/test/path.sock");
|
||||
|
||||
// The discover function should check the env var first
|
||||
// Since the socket doesn't exist, it should return None, but we can't test
|
||||
// the actual discovery logic without a real socket
|
||||
|
||||
std::env::remove_var("ZINIT_SOCKET_PATH");
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[test]
|
||||
fn test_socket_discovery_without_env_var() {
|
||||
// Ensure env var is not set
|
||||
std::env::remove_var("ZINIT_SOCKET_PATH");
|
||||
|
||||
// The discover function should try common paths
|
||||
// Since no zinit is running, it should return None
|
||||
let result = discover_zinit_socket();
|
||||
|
||||
// This is expected to be None in test environment
|
||||
assert!(
|
||||
result.is_none(),
|
||||
"Should return None when no zinit server is running"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
//! Rhai integration for the service manager module
|
||||
//!
|
||||
//! This module provides Rhai scripting support for service management operations.
|
||||
|
||||
use crate::{create_service_manager, ServiceConfig, ServiceManager};
|
||||
use rhai::{Engine, EvalAltResult, Map};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A wrapper around ServiceManager that can be used in Rhai
|
||||
#[derive(Clone)]
|
||||
pub struct RhaiServiceManager {
|
||||
inner: Arc<Box<dyn ServiceManager>>,
|
||||
}
|
||||
|
||||
impl RhaiServiceManager {
|
||||
pub fn new() -> Result<Self, Box<EvalAltResult>> {
|
||||
let manager = create_service_manager()
|
||||
.map_err(|e| format!("Failed to create service manager: {}", e))?;
|
||||
Ok(Self {
|
||||
inner: Arc::new(manager),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Register the service manager module with a Rhai engine
|
||||
pub fn register_service_manager_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
// Factory function to create service manager
|
||||
engine.register_type::<RhaiServiceManager>();
|
||||
engine.register_fn(
|
||||
"create_service_manager",
|
||||
|| -> Result<RhaiServiceManager, Box<EvalAltResult>> { RhaiServiceManager::new() },
|
||||
);
|
||||
|
||||
// Service management functions
|
||||
engine.register_fn(
|
||||
"start",
|
||||
|manager: &mut RhaiServiceManager, config: Map| -> Result<(), Box<EvalAltResult>> {
|
||||
let service_config = map_to_service_config(config)?;
|
||||
manager
|
||||
.inner
|
||||
.start(&service_config)
|
||||
.map_err(|e| format!("Failed to start service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"stop",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.stop(&service_name)
|
||||
.map_err(|e| format!("Failed to stop service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"restart",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.restart(&service_name)
|
||||
.map_err(|e| format!("Failed to restart service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"status",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<String, Box<EvalAltResult>> {
|
||||
let status = manager
|
||||
.inner
|
||||
.status(&service_name)
|
||||
.map_err(|e| format!("Failed to get service status: {}", e))?;
|
||||
Ok(format!("{:?}", status))
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"logs",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String,
|
||||
lines: i64|
|
||||
-> Result<String, Box<EvalAltResult>> {
|
||||
let lines_opt = if lines > 0 {
|
||||
Some(lines as usize)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
manager
|
||||
.inner
|
||||
.logs(&service_name, lines_opt)
|
||||
.map_err(|e| format!("Failed to get service logs: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"list",
|
||||
|manager: &mut RhaiServiceManager| -> Result<Vec<String>, Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.list()
|
||||
.map_err(|e| format!("Failed to list services: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"remove",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.remove(&service_name)
|
||||
.map_err(|e| format!("Failed to remove service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"exists",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<bool, Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.exists(&service_name)
|
||||
.map_err(|e| format!("Failed to check if service exists: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"start_and_confirm",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
config: Map,
|
||||
timeout_secs: i64|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
let service_config = map_to_service_config(config)?;
|
||||
let timeout = if timeout_secs > 0 {
|
||||
timeout_secs as u64
|
||||
} else {
|
||||
30
|
||||
};
|
||||
manager
|
||||
.inner
|
||||
.start_and_confirm(&service_config, timeout)
|
||||
.map_err(|e| format!("Failed to start and confirm service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"start_existing_and_confirm",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String,
|
||||
timeout_secs: i64|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
let timeout = if timeout_secs > 0 {
|
||||
timeout_secs as u64
|
||||
} else {
|
||||
30
|
||||
};
|
||||
manager
|
||||
.inner
|
||||
.start_existing_and_confirm(&service_name, timeout)
|
||||
.map_err(|e| format!("Failed to start existing service and confirm: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert a Rhai Map to a ServiceConfig
|
||||
fn map_to_service_config(map: Map) -> Result<ServiceConfig, Box<EvalAltResult>> {
|
||||
let name = map
|
||||
.get("name")
|
||||
.and_then(|v| v.clone().into_string().ok())
|
||||
.ok_or("Service config must have a 'name' field")?;
|
||||
|
||||
let binary_path = map
|
||||
.get("binary_path")
|
||||
.and_then(|v| v.clone().into_string().ok())
|
||||
.ok_or("Service config must have a 'binary_path' field")?;
|
||||
|
||||
let args = map
|
||||
.get("args")
|
||||
.and_then(|v| v.clone().try_cast::<rhai::Array>())
|
||||
.map(|arr| {
|
||||
arr.into_iter()
|
||||
.filter_map(|v| v.into_string().ok())
|
||||
.collect::<Vec<String>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let working_directory = map
|
||||
.get("working_directory")
|
||||
.and_then(|v| v.clone().into_string().ok());
|
||||
|
||||
let environment = map
|
||||
.get("environment")
|
||||
.and_then(|v| v.clone().try_cast::<Map>())
|
||||
.map(|env_map| {
|
||||
env_map
|
||||
.into_iter()
|
||||
.filter_map(|(k, v)| v.into_string().ok().map(|val| (k.to_string(), val)))
|
||||
.collect::<HashMap<String, String>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let auto_restart = map
|
||||
.get("auto_restart")
|
||||
.and_then(|v| v.as_bool().ok())
|
||||
.unwrap_or(false);
|
||||
|
||||
Ok(ServiceConfig {
|
||||
name,
|
||||
binary_path,
|
||||
args,
|
||||
working_directory,
|
||||
environment,
|
||||
auto_restart,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rhai::{Engine, Map};
|
||||
|
||||
#[test]
|
||||
fn test_register_service_manager_module() {
|
||||
let mut engine = Engine::new();
|
||||
register_service_manager_module(&mut engine).unwrap();
|
||||
|
||||
// Test that the functions are registered
|
||||
// Note: Rhai doesn't expose a public API to check if functions are registered
|
||||
// So we'll just verify the module registration doesn't panic
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_map_to_service_config() {
|
||||
let mut map = Map::new();
|
||||
map.insert("name".into(), "test-service".into());
|
||||
map.insert("binary_path".into(), "/bin/echo".into());
|
||||
map.insert("auto_restart".into(), true.into());
|
||||
|
||||
let config = map_to_service_config(map).unwrap();
|
||||
assert_eq!(config.name, "test-service");
|
||||
assert_eq!(config.binary_path, "/bin/echo");
|
||||
assert_eq!(config.auto_restart, true);
|
||||
}
|
||||
}
|
||||
@@ -1,434 +1,42 @@
|
||||
use crate::{ServiceConfig, ServiceManager, ServiceManagerError, ServiceStatus};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use async_trait::async_trait;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SystemdServiceManager {
|
||||
service_prefix: String,
|
||||
user_mode: bool,
|
||||
}
|
||||
pub struct SystemdServiceManager;
|
||||
|
||||
impl SystemdServiceManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
service_prefix: "sal".to_string(),
|
||||
user_mode: true, // Default to user services for safety
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_system() -> Self {
|
||||
Self {
|
||||
service_prefix: "sal".to_string(),
|
||||
user_mode: false, // System-wide services (requires root)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_service_name(&self, service_name: &str) -> String {
|
||||
format!("{}-{}.service", self.service_prefix, service_name)
|
||||
}
|
||||
|
||||
fn get_unit_file_path(&self, service_name: &str) -> PathBuf {
|
||||
let service_file = self.get_service_name(service_name);
|
||||
if self.user_mode {
|
||||
// User service directory
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".to_string());
|
||||
PathBuf::from(home)
|
||||
.join(".config")
|
||||
.join("systemd")
|
||||
.join("user")
|
||||
.join(service_file)
|
||||
} else {
|
||||
// System service directory
|
||||
PathBuf::from("/etc/systemd/system").join(service_file)
|
||||
}
|
||||
}
|
||||
|
||||
fn run_systemctl(&self, args: &[&str]) -> Result<String, ServiceManagerError> {
|
||||
let mut cmd = Command::new("systemctl");
|
||||
|
||||
if self.user_mode {
|
||||
cmd.arg("--user");
|
||||
}
|
||||
|
||||
cmd.args(args);
|
||||
|
||||
let output = cmd
|
||||
.output()
|
||||
.map_err(|e| ServiceManagerError::Other(format!("Failed to run systemctl: {}", e)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(ServiceManagerError::Other(format!(
|
||||
"systemctl command failed: {}",
|
||||
stderr
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
|
||||
fn create_unit_file(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
let unit_path = self.get_unit_file_path(&config.name);
|
||||
|
||||
// Ensure the directory exists
|
||||
if let Some(parent) = unit_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create unit directory: {}", e))
|
||||
})?;
|
||||
}
|
||||
|
||||
// Create the unit file content
|
||||
let mut unit_content = String::new();
|
||||
unit_content.push_str("[Unit]\n");
|
||||
unit_content.push_str(&format!("Description={} service\n", config.name));
|
||||
unit_content.push_str("After=network.target\n\n");
|
||||
|
||||
unit_content.push_str("[Service]\n");
|
||||
unit_content.push_str("Type=simple\n");
|
||||
|
||||
// Build the ExecStart command
|
||||
let mut exec_start = config.binary_path.clone();
|
||||
for arg in &config.args {
|
||||
exec_start.push(' ');
|
||||
exec_start.push_str(arg);
|
||||
}
|
||||
unit_content.push_str(&format!("ExecStart={}\n", exec_start));
|
||||
|
||||
if let Some(working_dir) = &config.working_directory {
|
||||
unit_content.push_str(&format!("WorkingDirectory={}\n", working_dir));
|
||||
}
|
||||
|
||||
// Add environment variables
|
||||
for (key, value) in &config.environment {
|
||||
unit_content.push_str(&format!("Environment=\"{}={}\"\n", key, value));
|
||||
}
|
||||
|
||||
if config.auto_restart {
|
||||
unit_content.push_str("Restart=always\n");
|
||||
unit_content.push_str("RestartSec=5\n");
|
||||
}
|
||||
|
||||
unit_content.push_str("\n[Install]\n");
|
||||
unit_content.push_str("WantedBy=default.target\n");
|
||||
|
||||
// Write the unit file
|
||||
fs::write(&unit_path, unit_content)
|
||||
.map_err(|e| ServiceManagerError::Other(format!("Failed to write unit file: {}", e)))?;
|
||||
|
||||
// Reload systemd to pick up the new unit file
|
||||
self.run_systemctl(&["daemon-reload"])?;
|
||||
|
||||
Ok(())
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ServiceManager for SystemdServiceManager {
|
||||
fn exists(&self, service_name: &str) -> Result<bool, ServiceManagerError> {
|
||||
let unit_path = self.get_unit_file_path(service_name);
|
||||
Ok(unit_path.exists())
|
||||
async fn start(&self, _config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
Err(ServiceManagerError::Other("Systemd implementation not yet complete".to_string()))
|
||||
}
|
||||
|
||||
fn start(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
let service_name = self.get_service_name(&config.name);
|
||||
|
||||
// Check if service already exists and is running
|
||||
if self.exists(&config.name)? {
|
||||
match self.status(&config.name)? {
|
||||
ServiceStatus::Running => {
|
||||
return Err(ServiceManagerError::ServiceAlreadyExists(
|
||||
config.name.clone(),
|
||||
));
|
||||
}
|
||||
_ => {
|
||||
// Service exists but not running, we can start it
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create the unit file
|
||||
self.create_unit_file(config)?;
|
||||
}
|
||||
|
||||
// Enable and start the service
|
||||
self.run_systemctl(&["enable", &service_name])
|
||||
.map_err(|e| ServiceManagerError::StartFailed(config.name.clone(), e.to_string()))?;
|
||||
|
||||
self.run_systemctl(&["start", &service_name])
|
||||
.map_err(|e| ServiceManagerError::StartFailed(config.name.clone(), e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
async fn stop(&self, _service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
Err(ServiceManagerError::Other("Systemd implementation not yet complete".to_string()))
|
||||
}
|
||||
|
||||
fn start_existing(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if unit file exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Check if already running
|
||||
match self.status(service_name)? {
|
||||
ServiceStatus::Running => {
|
||||
return Ok(()); // Already running, nothing to do
|
||||
}
|
||||
_ => {
|
||||
// Start the service
|
||||
self.run_systemctl(&["start", &service_unit]).map_err(|e| {
|
||||
ServiceManagerError::StartFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
async fn restart(&self, _service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
Err(ServiceManagerError::Other("Systemd implementation not yet complete".to_string()))
|
||||
}
|
||||
|
||||
fn start_and_confirm(
|
||||
&self,
|
||||
config: &ServiceConfig,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// Start the service first
|
||||
self.start(config)?;
|
||||
|
||||
// Wait for confirmation with timeout
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout_duration = std::time::Duration::from_secs(timeout_secs);
|
||||
|
||||
while start_time.elapsed() < timeout_duration {
|
||||
match self.status(&config.name) {
|
||||
Ok(ServiceStatus::Running) => return Ok(()),
|
||||
Ok(ServiceStatus::Failed) => {
|
||||
return Err(ServiceManagerError::StartFailed(
|
||||
config.name.clone(),
|
||||
"Service failed to start".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(_) => {
|
||||
// Still starting, wait a bit
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
Err(_) => {
|
||||
// Service might not exist yet, wait a bit
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(ServiceManagerError::StartFailed(
|
||||
config.name.clone(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
))
|
||||
async fn status(&self, _service_name: &str) -> Result<ServiceStatus, ServiceManagerError> {
|
||||
Err(ServiceManagerError::Other("Systemd implementation not yet complete".to_string()))
|
||||
}
|
||||
|
||||
fn start_existing_and_confirm(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// Start the existing service first
|
||||
self.start_existing(service_name)?;
|
||||
|
||||
// Wait for confirmation with timeout
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout_duration = std::time::Duration::from_secs(timeout_secs);
|
||||
|
||||
while start_time.elapsed() < timeout_duration {
|
||||
match self.status(service_name) {
|
||||
Ok(ServiceStatus::Running) => return Ok(()),
|
||||
Ok(ServiceStatus::Failed) => {
|
||||
return Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
"Service failed to start".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(_) => {
|
||||
// Still starting, wait a bit
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
Err(_) => {
|
||||
// Service might not exist yet, wait a bit
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
))
|
||||
async fn logs(&self, _service_name: &str, _lines: Option<usize>) -> Result<String, ServiceManagerError> {
|
||||
Err(ServiceManagerError::Other("Systemd implementation not yet complete".to_string()))
|
||||
}
|
||||
|
||||
fn stop(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Stop the service
|
||||
self.run_systemctl(&["stop", &service_unit]).map_err(|e| {
|
||||
ServiceManagerError::StopFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
async fn list(&self) -> Result<Vec<String>, ServiceManagerError> {
|
||||
Err(ServiceManagerError::Other("Systemd implementation not yet complete".to_string()))
|
||||
}
|
||||
|
||||
fn restart(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Restart the service
|
||||
self.run_systemctl(&["restart", &service_unit])
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::RestartFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
async fn remove(&self, _service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
Err(ServiceManagerError::Other("Systemd implementation not yet complete".to_string()))
|
||||
}
|
||||
|
||||
fn status(&self, service_name: &str) -> Result<ServiceStatus, ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Get service status
|
||||
let output = self
|
||||
.run_systemctl(&["is-active", &service_unit])
|
||||
.unwrap_or_else(|_| "unknown".to_string());
|
||||
|
||||
let status = match output.trim() {
|
||||
"active" => ServiceStatus::Running,
|
||||
"inactive" => ServiceStatus::Stopped,
|
||||
"failed" => ServiceStatus::Failed,
|
||||
_ => ServiceStatus::Unknown,
|
||||
};
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
fn logs(
|
||||
&self,
|
||||
service_name: &str,
|
||||
lines: Option<usize>,
|
||||
) -> Result<String, ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Build journalctl command
|
||||
let mut args = vec!["--unit", &service_unit, "--no-pager"];
|
||||
let lines_arg;
|
||||
if let Some(n) = lines {
|
||||
lines_arg = format!("--lines={}", n);
|
||||
args.push(&lines_arg);
|
||||
}
|
||||
|
||||
// Use journalctl to get logs
|
||||
let mut cmd = std::process::Command::new("journalctl");
|
||||
if self.user_mode {
|
||||
cmd.arg("--user");
|
||||
}
|
||||
cmd.args(&args);
|
||||
|
||||
let output = cmd.output().map_err(|e| {
|
||||
ServiceManagerError::LogsFailed(
|
||||
service_name.to_string(),
|
||||
format!("Failed to run journalctl: {}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(ServiceManagerError::LogsFailed(
|
||||
service_name.to_string(),
|
||||
format!("journalctl command failed: {}", stderr),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
|
||||
fn list(&self) -> Result<Vec<String>, ServiceManagerError> {
|
||||
// List all services with our prefix
|
||||
let output =
|
||||
self.run_systemctl(&["list-units", "--type=service", "--all", "--no-pager"])?;
|
||||
|
||||
let mut services = Vec::new();
|
||||
for line in output.lines() {
|
||||
if line.contains(&format!("{}-", self.service_prefix)) {
|
||||
// Extract service name from the line
|
||||
if let Some(unit_name) = line.split_whitespace().next() {
|
||||
if let Some(service_name) = unit_name.strip_suffix(".service") {
|
||||
if let Some(name) =
|
||||
service_name.strip_prefix(&format!("{}-", self.service_prefix))
|
||||
{
|
||||
services.push(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(services)
|
||||
}
|
||||
|
||||
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Try to stop the service first, but don't fail if it's already stopped
|
||||
if let Err(e) = self.stop(service_name) {
|
||||
log::warn!(
|
||||
"Failed to stop service '{}' before removal: {}",
|
||||
service_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
|
||||
// Disable the service
|
||||
if let Err(e) = self.run_systemctl(&["disable", &service_unit]) {
|
||||
log::warn!("Failed to disable service '{}': {}", service_name, e);
|
||||
}
|
||||
|
||||
// Remove the unit file
|
||||
let unit_path = self.get_unit_file_path(service_name);
|
||||
if unit_path.exists() {
|
||||
std::fs::remove_file(&unit_path).map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to remove unit file: {}", e))
|
||||
})?;
|
||||
}
|
||||
|
||||
// Reload systemd to pick up the changes
|
||||
self.run_systemctl(&["daemon-reload"])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,120 +1,26 @@
|
||||
use crate::{ServiceConfig, ServiceManager, ServiceManagerError, ServiceStatus};
|
||||
use once_cell::sync::Lazy;
|
||||
use async_trait::async_trait;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio::time::timeout;
|
||||
use zinit_client::{ServiceStatus as ZinitServiceStatus, ZinitClient, ZinitError};
|
||||
|
||||
// Shared runtime for async operations - production-safe initialization
|
||||
static ASYNC_RUNTIME: Lazy<Option<Runtime>> = Lazy::new(|| Runtime::new().ok());
|
||||
|
||||
/// Get the async runtime, creating a temporary one if the static runtime failed
|
||||
fn get_runtime() -> Result<Runtime, ServiceManagerError> {
|
||||
// Try to use the static runtime first
|
||||
if let Some(_runtime) = ASYNC_RUNTIME.as_ref() {
|
||||
// We can't return a reference to the static runtime because we need ownership
|
||||
// for block_on, so we create a new one. This is a reasonable trade-off for safety.
|
||||
Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create async runtime: {}", e))
|
||||
})
|
||||
} else {
|
||||
// Static runtime failed, try to create a new one
|
||||
Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create async runtime: {}", e))
|
||||
})
|
||||
}
|
||||
}
|
||||
use zinit_client::{get_zinit_client, ServiceStatus as ZinitServiceStatus, ZinitClientWrapper};
|
||||
|
||||
pub struct ZinitServiceManager {
|
||||
client: Arc<ZinitClient>,
|
||||
client: Arc<ZinitClientWrapper>,
|
||||
}
|
||||
|
||||
impl ZinitServiceManager {
|
||||
pub fn new(socket_path: &str) -> Result<Self, ServiceManagerError> {
|
||||
// Create the base zinit client directly
|
||||
let client = Arc::new(ZinitClient::new(socket_path));
|
||||
|
||||
// This is a blocking call to get the async client.
|
||||
// We might want to make this async in the future if the constructor can be async.
|
||||
let client = tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(get_zinit_client(socket_path))
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
Ok(ZinitServiceManager { client })
|
||||
}
|
||||
|
||||
/// Execute an async operation using the shared runtime or current context
|
||||
fn execute_async<F, T>(&self, operation: F) -> Result<T, ServiceManagerError>
|
||||
where
|
||||
F: std::future::Future<Output = Result<T, ZinitError>> + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
// Check if we're already in a tokio runtime context
|
||||
if let Ok(_handle) = tokio::runtime::Handle::try_current() {
|
||||
// We're in an async context, use spawn_blocking to avoid nested runtime
|
||||
let result = std::thread::spawn(
|
||||
move || -> Result<Result<T, ZinitError>, ServiceManagerError> {
|
||||
let rt = Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create runtime: {}", e))
|
||||
})?;
|
||||
Ok(rt.block_on(operation))
|
||||
},
|
||||
)
|
||||
.join()
|
||||
.map_err(|_| ServiceManagerError::Other("Thread join failed".to_string()))?;
|
||||
result?.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
} else {
|
||||
// No current runtime, use production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime
|
||||
.block_on(operation)
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute an async operation with timeout using the shared runtime or current context
|
||||
fn execute_async_with_timeout<F, T>(
|
||||
&self,
|
||||
operation: F,
|
||||
timeout_secs: u64,
|
||||
) -> Result<T, ServiceManagerError>
|
||||
where
|
||||
F: std::future::Future<Output = Result<T, ZinitError>> + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
let timeout_op = timeout(timeout_duration, operation);
|
||||
|
||||
// Check if we're already in a tokio runtime context
|
||||
if let Ok(_handle) = tokio::runtime::Handle::try_current() {
|
||||
// We're in an async context, use spawn_blocking to avoid nested runtime
|
||||
let result = std::thread::spawn(move || {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(timeout_op)
|
||||
})
|
||||
.join()
|
||||
.map_err(|_| ServiceManagerError::Other("Thread join failed".to_string()))?;
|
||||
|
||||
result
|
||||
.map_err(|_| {
|
||||
ServiceManagerError::Other(format!(
|
||||
"Operation timed out after {} seconds",
|
||||
timeout_secs
|
||||
))
|
||||
})?
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
} else {
|
||||
// No current runtime, use production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime
|
||||
.block_on(timeout_op)
|
||||
.map_err(|_| {
|
||||
ServiceManagerError::Other(format!(
|
||||
"Operation timed out after {} seconds",
|
||||
timeout_secs
|
||||
))
|
||||
})?
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ServiceManager for ZinitServiceManager {
|
||||
fn exists(&self, service_name: &str) -> Result<bool, ServiceManagerError> {
|
||||
let status_res = self.status(service_name);
|
||||
@@ -126,254 +32,91 @@ impl ServiceManager for ZinitServiceManager {
|
||||
}
|
||||
|
||||
fn start(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
// Build the exec command with args
|
||||
let mut exec_command = config.binary_path.clone();
|
||||
if !config.args.is_empty() {
|
||||
exec_command.push(' ');
|
||||
exec_command.push_str(&config.args.join(" "));
|
||||
}
|
||||
|
||||
// Create zinit-compatible service configuration
|
||||
let mut service_config = json!({
|
||||
"exec": exec_command,
|
||||
"oneshot": !config.auto_restart, // zinit uses oneshot, not restart
|
||||
let service_config = json!({
|
||||
"exec": config.binary_path,
|
||||
"args": config.args,
|
||||
"working_directory": config.working_directory,
|
||||
"env": config.environment,
|
||||
"restart": config.auto_restart,
|
||||
});
|
||||
|
||||
// Add optional fields if present
|
||||
if let Some(ref working_dir) = config.working_directory {
|
||||
// Zinit doesn't support working_directory directly, so we need to modify the exec command
|
||||
let cd_command = format!("cd {} && {}", working_dir, exec_command);
|
||||
service_config["exec"] = json!(cd_command);
|
||||
}
|
||||
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name = config.name.clone();
|
||||
self.execute_async(
|
||||
async move { client.create_service(&service_name, service_config).await },
|
||||
)
|
||||
.map_err(|e| ServiceManagerError::StartFailed(config.name.clone(), e.to_string()))?;
|
||||
tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(self.client.create_service(&config.name, service_config))
|
||||
.map_err(|e| ServiceManagerError::StartFailed(config.name.clone(), e.to_string()))?;
|
||||
|
||||
self.start_existing(&config.name)
|
||||
}
|
||||
|
||||
fn start_existing(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let service_name_for_error = service_name.to_string();
|
||||
self.execute_async(async move { client.start(&service_name_owned).await })
|
||||
.map_err(|e| ServiceManagerError::StartFailed(service_name_for_error, e.to_string()))
|
||||
tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(self.client.start(service_name))
|
||||
.map_err(|e| ServiceManagerError::StartFailed(service_name.to_string(), e.to_string()))
|
||||
}
|
||||
|
||||
fn start_and_confirm(
|
||||
&self,
|
||||
config: &ServiceConfig,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// Start the service first
|
||||
self.start(config)?;
|
||||
|
||||
// Wait for confirmation with timeout using the shared runtime
|
||||
self.execute_async_with_timeout(
|
||||
async move {
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
|
||||
while start_time.elapsed() < timeout_duration {
|
||||
// We need to call status in a blocking way from within the async context
|
||||
// For now, we'll use a simple polling approach
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
// Return a timeout error that will be handled by execute_async_with_timeout
|
||||
// Use a generic error since we don't know the exact ZinitError variants
|
||||
Err(ZinitError::from(std::io::Error::new(
|
||||
std::io::ErrorKind::TimedOut,
|
||||
"Timeout waiting for service confirmation",
|
||||
)))
|
||||
},
|
||||
timeout_secs,
|
||||
)?;
|
||||
|
||||
// Check final status
|
||||
match self.status(&config.name)? {
|
||||
ServiceStatus::Running => Ok(()),
|
||||
ServiceStatus::Failed => Err(ServiceManagerError::StartFailed(
|
||||
config.name.clone(),
|
||||
"Service failed to start".to_string(),
|
||||
)),
|
||||
_ => Err(ServiceManagerError::StartFailed(
|
||||
config.name.clone(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
)),
|
||||
}
|
||||
async fn start_and_confirm(&self, config: &ServiceConfig, _timeout_secs: u64) -> Result<(), ServiceManagerError> {
|
||||
self.start(config)
|
||||
}
|
||||
|
||||
fn start_existing_and_confirm(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// Start the existing service first
|
||||
self.start_existing(service_name)?;
|
||||
async fn run(&self, config: &ServiceConfig, _timeout_secs: u64) -> Result<(), ServiceManagerError> {
|
||||
self.start(config)
|
||||
}
|
||||
|
||||
// Wait for confirmation with timeout using the shared runtime
|
||||
self.execute_async_with_timeout(
|
||||
async move {
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
|
||||
while start_time.elapsed() < timeout_duration {
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
// Return a timeout error that will be handled by execute_async_with_timeout
|
||||
// Use a generic error since we don't know the exact ZinitError variants
|
||||
Err(ZinitError::from(std::io::Error::new(
|
||||
std::io::ErrorKind::TimedOut,
|
||||
"Timeout waiting for service confirmation",
|
||||
)))
|
||||
},
|
||||
timeout_secs,
|
||||
)?;
|
||||
|
||||
// Check final status
|
||||
match self.status(service_name)? {
|
||||
ServiceStatus::Running => Ok(()),
|
||||
ServiceStatus::Failed => Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
"Service failed to start".to_string(),
|
||||
)),
|
||||
_ => Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
)),
|
||||
}
|
||||
async fn start_existing_and_confirm(&self, service_name: &str, _timeout_secs: u64) -> Result<(), ServiceManagerError> {
|
||||
self.start_existing(service_name)
|
||||
}
|
||||
|
||||
fn stop(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let service_name_for_error = service_name.to_string();
|
||||
self.execute_async(async move { client.stop(&service_name_owned).await })
|
||||
.map_err(|e| ServiceManagerError::StopFailed(service_name_for_error, e.to_string()))
|
||||
tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(self.client.stop(service_name))
|
||||
.map_err(|e| ServiceManagerError::StopFailed(service_name.to_string(), e.to_string()))
|
||||
}
|
||||
|
||||
fn restart(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let service_name_for_error = service_name.to_string();
|
||||
self.execute_async(async move { client.restart(&service_name_owned).await })
|
||||
.map_err(|e| ServiceManagerError::RestartFailed(service_name_for_error, e.to_string()))
|
||||
tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(self.client.restart(service_name))
|
||||
.map_err(|e| ServiceManagerError::RestartFailed(service_name.to_string(), e.to_string()))
|
||||
}
|
||||
|
||||
fn status(&self, service_name: &str) -> Result<ServiceStatus, ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let service_name_for_error = service_name.to_string();
|
||||
let status: ZinitServiceStatus = self
|
||||
.execute_async(async move { client.status(&service_name_owned).await })
|
||||
.map_err(|e| {
|
||||
// Check if this is a "service not found" error
|
||||
if e.to_string().contains("not found") || e.to_string().contains("does not exist") {
|
||||
ServiceManagerError::ServiceNotFound(service_name_for_error)
|
||||
} else {
|
||||
ServiceManagerError::Other(e.to_string())
|
||||
}
|
||||
})?;
|
||||
let status: ZinitServiceStatus = tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(self.client.status(service_name))
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
|
||||
// ServiceStatus is a struct with fields, not an enum
|
||||
// We need to check the state field to determine the status
|
||||
// Convert ServiceState to string and match on that
|
||||
let state_str = format!("{:?}", status.state).to_lowercase();
|
||||
let service_status = match state_str.as_str() {
|
||||
s if s.contains("running") => crate::ServiceStatus::Running,
|
||||
s if s.contains("stopped") => crate::ServiceStatus::Stopped,
|
||||
s if s.contains("failed") => crate::ServiceStatus::Failed,
|
||||
_ => crate::ServiceStatus::Unknown,
|
||||
let service_status = match status {
|
||||
ZinitServiceStatus::Running(_) => crate::ServiceStatus::Running,
|
||||
ZinitServiceStatus::Stopped => crate::ServiceStatus::Stopped,
|
||||
ZinitServiceStatus::Failed(_) => crate::ServiceStatus::Failed,
|
||||
ZinitServiceStatus::Waiting(_) => crate::ServiceStatus::Unknown,
|
||||
};
|
||||
Ok(service_status)
|
||||
}
|
||||
|
||||
fn logs(
|
||||
&self,
|
||||
service_name: &str,
|
||||
_lines: Option<usize>,
|
||||
) -> Result<String, ServiceManagerError> {
|
||||
// The logs method takes (follow: bool, filter: Option<impl AsRef<str>>)
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let logs = self
|
||||
.execute_async(async move {
|
||||
use futures::StreamExt;
|
||||
use tokio::time::{timeout, Duration};
|
||||
|
||||
let mut log_stream = client
|
||||
.logs(false, Some(service_name_owned.as_str()))
|
||||
.await?;
|
||||
let mut logs = Vec::new();
|
||||
|
||||
// Collect logs from the stream with a reasonable limit
|
||||
let mut count = 0;
|
||||
const MAX_LOGS: usize = 100;
|
||||
const LOG_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
// Use timeout to prevent hanging
|
||||
let result = timeout(LOG_TIMEOUT, async {
|
||||
while let Some(log_result) = log_stream.next().await {
|
||||
match log_result {
|
||||
Ok(log_entry) => {
|
||||
logs.push(format!("{:?}", log_entry));
|
||||
count += 1;
|
||||
if count >= MAX_LOGS {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
// Handle timeout - this is not an error, just means no more logs available
|
||||
if result.is_err() {
|
||||
log::debug!(
|
||||
"Log reading timed out after {} seconds, returning {} logs",
|
||||
LOG_TIMEOUT.as_secs(),
|
||||
logs.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok::<Vec<String>, ZinitError>(logs)
|
||||
})
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::LogsFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
fn logs(&self, service_name: &str, _lines: Option<usize>) -> Result<String, ServiceManagerError> {
|
||||
let logs = tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(self.client.logs(Some(service_name.to_string())))
|
||||
.map_err(|e| ServiceManagerError::LogsFailed(service_name.to_string(), e.to_string()))?;
|
||||
Ok(logs.join("\n"))
|
||||
}
|
||||
|
||||
fn list(&self) -> Result<Vec<String>, ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let services = self
|
||||
.execute_async(async move { client.list().await })
|
||||
let services = tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(self.client.list())
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
Ok(services.keys().cloned().collect())
|
||||
}
|
||||
|
||||
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
// Try to stop the service first, but don't fail if it's already stopped or doesn't exist
|
||||
if let Err(e) = self.stop(service_name) {
|
||||
// Log the error but continue with removal
|
||||
log::warn!(
|
||||
"Failed to stop service '{}' before removal: {}",
|
||||
service_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name = service_name.to_string();
|
||||
self.execute_async(async move { client.delete_service(&service_name).await })
|
||||
let _ = self.stop(service_name); // Best effort to stop before removing
|
||||
tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(self.client.delete_service(service_name))
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,243 +0,0 @@
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig, ServiceManager};
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn test_create_service_manager() {
|
||||
// Test that the factory function creates the appropriate service manager for the platform
|
||||
let manager = create_service_manager().expect("Failed to create service manager");
|
||||
|
||||
// Test basic functionality - should be able to call methods without panicking
|
||||
let list_result = manager.list();
|
||||
|
||||
// The result might be an error (if no service system is available), but it shouldn't panic
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!(
|
||||
"✓ Service manager created successfully, found {} services",
|
||||
services.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Service manager created, but got expected error: {}", e);
|
||||
// This is expected on systems without the appropriate service manager
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_config_creation() {
|
||||
// Test creating various service configurations
|
||||
let basic_config = ServiceConfig {
|
||||
name: "test-service".to_string(),
|
||||
binary_path: "/usr/bin/echo".to_string(),
|
||||
args: vec!["hello".to_string(), "world".to_string()],
|
||||
working_directory: None,
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
assert_eq!(basic_config.name, "test-service");
|
||||
assert_eq!(basic_config.binary_path, "/usr/bin/echo");
|
||||
assert_eq!(basic_config.args.len(), 2);
|
||||
assert_eq!(basic_config.args[0], "hello");
|
||||
assert_eq!(basic_config.args[1], "world");
|
||||
assert!(basic_config.working_directory.is_none());
|
||||
assert!(basic_config.environment.is_empty());
|
||||
assert!(!basic_config.auto_restart);
|
||||
|
||||
println!("✓ Basic service config created successfully");
|
||||
|
||||
// Test config with environment variables
|
||||
let mut env = HashMap::new();
|
||||
env.insert("PATH".to_string(), "/usr/bin:/bin".to_string());
|
||||
env.insert("HOME".to_string(), "/tmp".to_string());
|
||||
|
||||
let env_config = ServiceConfig {
|
||||
name: "env-service".to_string(),
|
||||
binary_path: "/usr/bin/env".to_string(),
|
||||
args: vec![],
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
environment: env.clone(),
|
||||
auto_restart: true,
|
||||
};
|
||||
|
||||
assert_eq!(env_config.name, "env-service");
|
||||
assert_eq!(env_config.binary_path, "/usr/bin/env");
|
||||
assert!(env_config.args.is_empty());
|
||||
assert_eq!(env_config.working_directory, Some("/tmp".to_string()));
|
||||
assert_eq!(env_config.environment.len(), 2);
|
||||
assert_eq!(
|
||||
env_config.environment.get("PATH"),
|
||||
Some(&"/usr/bin:/bin".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
env_config.environment.get("HOME"),
|
||||
Some(&"/tmp".to_string())
|
||||
);
|
||||
assert!(env_config.auto_restart);
|
||||
|
||||
println!("✓ Environment service config created successfully");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_config_clone() {
|
||||
// Test that ServiceConfig can be cloned
|
||||
let original_config = ServiceConfig {
|
||||
name: "original".to_string(),
|
||||
binary_path: "/bin/sh".to_string(),
|
||||
args: vec!["-c".to_string(), "echo test".to_string()],
|
||||
working_directory: Some("/home".to_string()),
|
||||
environment: {
|
||||
let mut env = HashMap::new();
|
||||
env.insert("TEST".to_string(), "value".to_string());
|
||||
env
|
||||
},
|
||||
auto_restart: true,
|
||||
};
|
||||
|
||||
let cloned_config = original_config.clone();
|
||||
|
||||
assert_eq!(original_config.name, cloned_config.name);
|
||||
assert_eq!(original_config.binary_path, cloned_config.binary_path);
|
||||
assert_eq!(original_config.args, cloned_config.args);
|
||||
assert_eq!(
|
||||
original_config.working_directory,
|
||||
cloned_config.working_directory
|
||||
);
|
||||
assert_eq!(original_config.environment, cloned_config.environment);
|
||||
assert_eq!(original_config.auto_restart, cloned_config.auto_restart);
|
||||
|
||||
println!("✓ Service config cloning works correctly");
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[test]
|
||||
fn test_macos_service_manager() {
|
||||
use sal_service_manager::LaunchctlServiceManager;
|
||||
|
||||
// Test creating macOS-specific service manager
|
||||
let manager = LaunchctlServiceManager::new();
|
||||
|
||||
// Test basic functionality
|
||||
let list_result = manager.list();
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!(
|
||||
"✓ macOS LaunchctlServiceManager created successfully, found {} services",
|
||||
services.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!(
|
||||
"✓ macOS LaunchctlServiceManager created, but got expected error: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[test]
|
||||
fn test_linux_service_manager() {
|
||||
use sal_service_manager::SystemdServiceManager;
|
||||
|
||||
// Test creating Linux-specific service manager
|
||||
let manager = SystemdServiceManager::new();
|
||||
|
||||
// Test basic functionality
|
||||
let list_result = manager.list();
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!(
|
||||
"✓ Linux SystemdServiceManager created successfully, found {} services",
|
||||
services.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!(
|
||||
"✓ Linux SystemdServiceManager created, but got expected error: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_status_debug() {
|
||||
use sal_service_manager::ServiceStatus;
|
||||
|
||||
// Test that ServiceStatus can be debugged and cloned
|
||||
let statuses = vec![
|
||||
ServiceStatus::Running,
|
||||
ServiceStatus::Stopped,
|
||||
ServiceStatus::Failed,
|
||||
ServiceStatus::Unknown,
|
||||
];
|
||||
|
||||
for status in &statuses {
|
||||
let cloned = status.clone();
|
||||
let debug_str = format!("{:?}", status);
|
||||
|
||||
assert!(!debug_str.is_empty());
|
||||
assert_eq!(status, &cloned);
|
||||
|
||||
println!(
|
||||
"✓ ServiceStatus::{:?} debug and clone work correctly",
|
||||
status
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_manager_error_debug() {
|
||||
use sal_service_manager::ServiceManagerError;
|
||||
|
||||
// Test that ServiceManagerError can be debugged and displayed
|
||||
let errors = vec![
|
||||
ServiceManagerError::ServiceNotFound("test".to_string()),
|
||||
ServiceManagerError::ServiceAlreadyExists("test".to_string()),
|
||||
ServiceManagerError::StartFailed("test".to_string(), "reason".to_string()),
|
||||
ServiceManagerError::StopFailed("test".to_string(), "reason".to_string()),
|
||||
ServiceManagerError::RestartFailed("test".to_string(), "reason".to_string()),
|
||||
ServiceManagerError::LogsFailed("test".to_string(), "reason".to_string()),
|
||||
ServiceManagerError::Other("generic error".to_string()),
|
||||
];
|
||||
|
||||
for error in &errors {
|
||||
let debug_str = format!("{:?}", error);
|
||||
let display_str = format!("{}", error);
|
||||
|
||||
assert!(!debug_str.is_empty());
|
||||
assert!(!display_str.is_empty());
|
||||
|
||||
println!("✓ Error debug: {:?}", error);
|
||||
println!("✓ Error display: {}", error);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_manager_trait_object() {
|
||||
// Test that we can use ServiceManager as a trait object
|
||||
let manager: Box<dyn ServiceManager> =
|
||||
create_service_manager().expect("Failed to create service manager");
|
||||
|
||||
// Test that we can call methods through the trait object
|
||||
let list_result = manager.list();
|
||||
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!("✓ Trait object works, found {} services", services.len());
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Trait object works, got expected error: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Test exists method
|
||||
let exists_result = manager.exists("non-existent-service");
|
||||
match exists_result {
|
||||
Ok(false) => println!("✓ Trait object exists method works correctly"),
|
||||
Ok(true) => println!("⚠ Unexpectedly found non-existent service"),
|
||||
Err(_) => println!("✓ Trait object exists method works (with error)"),
|
||||
}
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
// Service lifecycle management test script
|
||||
// This script tests REAL complete service lifecycle scenarios
|
||||
|
||||
print("=== Service Lifecycle Management Test ===");
|
||||
|
||||
// Create service manager
|
||||
let manager = create_service_manager();
|
||||
print("✓ Service manager created");
|
||||
|
||||
// Test configuration - real services for testing
|
||||
let test_services = [
|
||||
#{
|
||||
name: "lifecycle-test-1",
|
||||
binary_path: "/bin/echo",
|
||||
args: ["Lifecycle test 1"],
|
||||
working_directory: "/tmp",
|
||||
environment: #{},
|
||||
auto_restart: false
|
||||
},
|
||||
#{
|
||||
name: "lifecycle-test-2",
|
||||
binary_path: "/bin/echo",
|
||||
args: ["Lifecycle test 2"],
|
||||
working_directory: "/tmp",
|
||||
environment: #{ "TEST_VAR": "test_value" },
|
||||
auto_restart: false
|
||||
}
|
||||
];
|
||||
|
||||
let total_tests = 0;
|
||||
let passed_tests = 0;
|
||||
|
||||
// Test 1: Service Creation and Start
|
||||
print("\n1. Testing service creation and start...");
|
||||
for service_config in test_services {
|
||||
print(`\nStarting service: ${service_config.name}`);
|
||||
try {
|
||||
start(manager, service_config);
|
||||
print(` ✓ Service ${service_config.name} started successfully`);
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Service ${service_config.name} start failed: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 2: Service Existence Check
|
||||
print("\n2. Testing service existence checks...");
|
||||
for service_config in test_services {
|
||||
print(`\nChecking existence of: ${service_config.name}`);
|
||||
try {
|
||||
let service_exists = exists(manager, service_config.name);
|
||||
if service_exists {
|
||||
print(` ✓ Service ${service_config.name} exists: ${service_exists}`);
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print(` ✗ Service ${service_config.name} doesn't exist after start`);
|
||||
}
|
||||
} catch(e) {
|
||||
print(` ✗ Existence check failed for ${service_config.name}: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 3: Status Check
|
||||
print("\n3. Testing status checks...");
|
||||
for service_config in test_services {
|
||||
print(`\nChecking status of: ${service_config.name}`);
|
||||
try {
|
||||
let service_status = status(manager, service_config.name);
|
||||
print(` ✓ Service ${service_config.name} status: ${service_status}`);
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Status check failed for ${service_config.name}: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 4: Service List Check
|
||||
print("\n4. Testing service list...");
|
||||
try {
|
||||
let services = list(manager);
|
||||
print(` ✓ Service list retrieved (${services.len()} services)`);
|
||||
|
||||
// Check if our test services are in the list
|
||||
for service_config in test_services {
|
||||
let found = false;
|
||||
for service in services {
|
||||
if service.contains(service_config.name) {
|
||||
found = true;
|
||||
print(` ✓ Found ${service_config.name} in list`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
print(` ⚠ ${service_config.name} not found in service list`);
|
||||
}
|
||||
}
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Service list failed: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
|
||||
// Test 5: Service Stop
|
||||
print("\n5. Testing service stop...");
|
||||
for service_config in test_services {
|
||||
print(`\nStopping service: ${service_config.name}`);
|
||||
try {
|
||||
stop(manager, service_config.name);
|
||||
print(` ✓ Service ${service_config.name} stopped successfully`);
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Service ${service_config.name} stop failed: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 6: Service Removal
|
||||
print("\n6. Testing service removal...");
|
||||
for service_config in test_services {
|
||||
print(`\nRemoving service: ${service_config.name}`);
|
||||
try {
|
||||
remove(manager, service_config.name);
|
||||
print(` ✓ Service ${service_config.name} removed successfully`);
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Service ${service_config.name} removal failed: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 7: Cleanup Verification
|
||||
print("\n7. Testing cleanup verification...");
|
||||
for service_config in test_services {
|
||||
print(`\nVerifying removal of: ${service_config.name}`);
|
||||
try {
|
||||
let exists_after_remove = exists(manager, service_config.name);
|
||||
if !exists_after_remove {
|
||||
print(` ✓ Service ${service_config.name} correctly doesn't exist after removal`);
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print(` ✗ Service ${service_config.name} still exists after removal`);
|
||||
}
|
||||
} catch(e) {
|
||||
print(` ✗ Cleanup verification failed for ${service_config.name}: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test Summary
|
||||
print("\n=== Lifecycle Test Summary ===");
|
||||
print(`Services tested: ${test_services.len()}`);
|
||||
print(`Total operations: ${total_tests}`);
|
||||
print(`Successful operations: ${passed_tests}`);
|
||||
print(`Failed operations: ${total_tests - passed_tests}`);
|
||||
print(`Success rate: ${(passed_tests * 100) / total_tests}%`);
|
||||
|
||||
if passed_tests == total_tests {
|
||||
print("\n🎉 All lifecycle tests passed!");
|
||||
print("Service manager is working correctly across all scenarios.");
|
||||
} else {
|
||||
print(`\n⚠ ${total_tests - passed_tests} test(s) failed`);
|
||||
print("Some service manager operations need attention.");
|
||||
}
|
||||
|
||||
print("\n=== Service Lifecycle Test Complete ===");
|
||||
|
||||
// Return test results
|
||||
#{
|
||||
summary: #{
|
||||
total_tests: total_tests,
|
||||
passed_tests: passed_tests,
|
||||
success_rate: (passed_tests * 100) / total_tests,
|
||||
services_tested: test_services.len()
|
||||
}
|
||||
}
|
||||
@@ -1,218 +0,0 @@
|
||||
// Basic service manager functionality test script
|
||||
// This script tests the REAL service manager through Rhai integration
|
||||
|
||||
print("=== Service Manager Basic Functionality Test ===");
|
||||
|
||||
// Test configuration
|
||||
let test_service_name = "rhai-test-service";
|
||||
let test_binary = "/bin/echo";
|
||||
let test_args = ["Hello from Rhai service manager test"];
|
||||
|
||||
print(`Testing service: ${test_service_name}`);
|
||||
print(`Binary: ${test_binary}`);
|
||||
print(`Args: ${test_args}`);
|
||||
|
||||
// Test results tracking
|
||||
let test_results = #{
|
||||
creation: "NOT_RUN",
|
||||
exists_before: "NOT_RUN",
|
||||
start: "NOT_RUN",
|
||||
exists_after: "NOT_RUN",
|
||||
status: "NOT_RUN",
|
||||
list: "NOT_RUN",
|
||||
stop: "NOT_RUN",
|
||||
remove: "NOT_RUN",
|
||||
cleanup: "NOT_RUN"
|
||||
};
|
||||
|
||||
let passed_tests = 0;
|
||||
let total_tests = 0;
|
||||
|
||||
// Test 1: Service Manager Creation
|
||||
print("\n1. Testing service manager creation...");
|
||||
try {
|
||||
let manager = create_service_manager();
|
||||
print("✓ Service manager created successfully");
|
||||
test_results["creation"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service manager creation failed: ${e}`);
|
||||
test_results["creation"] = "FAIL";
|
||||
total_tests += 1;
|
||||
// Return early if we can't create the manager
|
||||
return test_results;
|
||||
}
|
||||
|
||||
// Create the service manager for all subsequent tests
|
||||
let manager = create_service_manager();
|
||||
|
||||
// Test 2: Check if service exists before creation
|
||||
print("\n2. Testing service existence check (before creation)...");
|
||||
try {
|
||||
let exists_before = exists(manager, test_service_name);
|
||||
print(`✓ Service existence check: ${exists_before}`);
|
||||
|
||||
if !exists_before {
|
||||
print("✓ Service correctly doesn't exist before creation");
|
||||
test_results["exists_before"] = "PASS";
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print("⚠ Service unexpectedly exists before creation");
|
||||
test_results["exists_before"] = "WARN";
|
||||
}
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service existence check failed: ${e}`);
|
||||
test_results["exists_before"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 3: Start the service
|
||||
print("\n3. Testing service start...");
|
||||
try {
|
||||
// Create a service configuration object
|
||||
let service_config = #{
|
||||
name: test_service_name,
|
||||
binary_path: test_binary,
|
||||
args: test_args,
|
||||
working_directory: "/tmp",
|
||||
environment: #{},
|
||||
auto_restart: false
|
||||
};
|
||||
|
||||
start(manager, service_config);
|
||||
print("✓ Service started successfully");
|
||||
test_results["start"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service start failed: ${e}`);
|
||||
test_results["start"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 4: Check if service exists after creation
|
||||
print("\n4. Testing service existence check (after creation)...");
|
||||
try {
|
||||
let exists_after = exists(manager, test_service_name);
|
||||
print(`✓ Service existence check: ${exists_after}`);
|
||||
|
||||
if exists_after {
|
||||
print("✓ Service correctly exists after creation");
|
||||
test_results["exists_after"] = "PASS";
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print("✗ Service doesn't exist after creation");
|
||||
test_results["exists_after"] = "FAIL";
|
||||
}
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service existence check failed: ${e}`);
|
||||
test_results["exists_after"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 5: Check service status
|
||||
print("\n5. Testing service status...");
|
||||
try {
|
||||
let service_status = status(manager, test_service_name);
|
||||
print(`✓ Service status: ${service_status}`);
|
||||
test_results["status"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service status check failed: ${e}`);
|
||||
test_results["status"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 6: List services
|
||||
print("\n6. Testing service list...");
|
||||
try {
|
||||
let services = list(manager);
|
||||
print("✓ Service list retrieved");
|
||||
|
||||
// Skip service search due to Rhai type constraints with Vec iteration
|
||||
print(" ⚠️ Skipping service search due to Rhai type constraints");
|
||||
|
||||
test_results["list"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service list failed: ${e}`);
|
||||
test_results["list"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 7: Stop the service
|
||||
print("\n7. Testing service stop...");
|
||||
try {
|
||||
stop(manager, test_service_name);
|
||||
print(`✓ Service stopped: ${test_service_name}`);
|
||||
test_results["stop"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service stop failed: ${e}`);
|
||||
test_results["stop"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 8: Remove the service
|
||||
print("\n8. Testing service remove...");
|
||||
try {
|
||||
remove(manager, test_service_name);
|
||||
print(`✓ Service removed: ${test_service_name}`);
|
||||
test_results["remove"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service remove failed: ${e}`);
|
||||
test_results["remove"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 9: Verify cleanup
|
||||
print("\n9. Testing cleanup verification...");
|
||||
try {
|
||||
let exists_after_remove = exists(manager, test_service_name);
|
||||
if !exists_after_remove {
|
||||
print("✓ Service correctly doesn't exist after removal");
|
||||
test_results["cleanup"] = "PASS";
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print("✗ Service still exists after removal");
|
||||
test_results["cleanup"] = "FAIL";
|
||||
}
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Cleanup verification failed: ${e}`);
|
||||
test_results["cleanup"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test Summary
|
||||
print("\n=== Test Summary ===");
|
||||
print(`Total tests: ${total_tests}`);
|
||||
print(`Passed: ${passed_tests}`);
|
||||
print(`Failed: ${total_tests - passed_tests}`);
|
||||
print(`Success rate: ${(passed_tests * 100) / total_tests}%`);
|
||||
|
||||
print("\nDetailed Results:");
|
||||
for test_name in test_results.keys() {
|
||||
let result = test_results[test_name];
|
||||
let status_icon = if result == "PASS" { "✓" } else if result == "FAIL" { "✗" } else { "⚠" };
|
||||
print(` ${status_icon} ${test_name}: ${result}`);
|
||||
}
|
||||
|
||||
if passed_tests == total_tests {
|
||||
print("\n🎉 All tests passed!");
|
||||
} else {
|
||||
print(`\n⚠ ${total_tests - passed_tests} test(s) failed`);
|
||||
}
|
||||
|
||||
print("\n=== Service Manager Basic Test Complete ===");
|
||||
|
||||
// Return test results for potential use by calling code
|
||||
test_results
|
||||
@@ -1,252 +0,0 @@
|
||||
use rhai::{Engine, EvalAltResult};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
/// Helper function to create a Rhai engine for service manager testing
|
||||
fn create_service_manager_engine() -> Result<Engine, Box<EvalAltResult>> {
|
||||
#[cfg(feature = "rhai")]
|
||||
{
|
||||
let mut engine = Engine::new();
|
||||
// Register the service manager module for real testing
|
||||
sal_service_manager::rhai::register_service_manager_module(&mut engine)?;
|
||||
Ok(engine)
|
||||
}
|
||||
#[cfg(not(feature = "rhai"))]
|
||||
{
|
||||
Ok(Engine::new())
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to run a Rhai script file
|
||||
fn run_rhai_script(script_path: &str) -> Result<rhai::Dynamic, Box<EvalAltResult>> {
|
||||
let engine = create_service_manager_engine()?;
|
||||
|
||||
// Read the script file
|
||||
let script_content = fs::read_to_string(script_path)
|
||||
.map_err(|e| format!("Failed to read script file {}: {}", script_path, e))?;
|
||||
|
||||
// Execute the script
|
||||
engine.eval::<rhai::Dynamic>(&script_content)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_service_manager_basic() {
|
||||
let script_path = "tests/rhai/service_manager_basic.rhai";
|
||||
|
||||
if !Path::new(script_path).exists() {
|
||||
println!("⚠ Skipping test: Rhai script not found at {}", script_path);
|
||||
return;
|
||||
}
|
||||
|
||||
println!("Running Rhai service manager basic test...");
|
||||
|
||||
match run_rhai_script(script_path) {
|
||||
Ok(result) => {
|
||||
println!("✓ Rhai basic test completed successfully");
|
||||
|
||||
// Try to extract test results if the script returns them
|
||||
if let Some(map) = result.try_cast::<rhai::Map>() {
|
||||
println!("Test results received from Rhai script:");
|
||||
for (key, value) in map.iter() {
|
||||
println!(" {}: {:?}", key, value);
|
||||
}
|
||||
|
||||
// Check if all tests passed
|
||||
let all_passed = map.values().all(|v| {
|
||||
if let Some(s) = v.clone().try_cast::<String>() {
|
||||
s == "PASS"
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
if all_passed {
|
||||
println!("✓ All Rhai tests reported as PASS");
|
||||
} else {
|
||||
println!("⚠ Some Rhai tests did not pass");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✗ Rhai basic test failed: {}", e);
|
||||
assert!(false, "Rhai script execution failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_service_lifecycle() {
|
||||
let script_path = "tests/rhai/service_lifecycle.rhai";
|
||||
|
||||
if !Path::new(script_path).exists() {
|
||||
println!("⚠ Skipping test: Rhai script not found at {}", script_path);
|
||||
return;
|
||||
}
|
||||
|
||||
println!("Running Rhai service lifecycle test...");
|
||||
|
||||
match run_rhai_script(script_path) {
|
||||
Ok(result) => {
|
||||
println!("✓ Rhai lifecycle test completed successfully");
|
||||
|
||||
// Try to extract test results if the script returns them
|
||||
if let Some(map) = result.try_cast::<rhai::Map>() {
|
||||
println!("Lifecycle test results received from Rhai script:");
|
||||
|
||||
// Extract summary if available
|
||||
if let Some(summary) = map.get("summary") {
|
||||
if let Some(summary_map) = summary.clone().try_cast::<rhai::Map>() {
|
||||
println!("Summary:");
|
||||
for (key, value) in summary_map.iter() {
|
||||
println!(" {}: {:?}", key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract performance metrics if available
|
||||
if let Some(performance) = map.get("performance") {
|
||||
if let Some(perf_map) = performance.clone().try_cast::<rhai::Map>() {
|
||||
println!("Performance:");
|
||||
for (key, value) in perf_map.iter() {
|
||||
println!(" {}: {:?}", key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✗ Rhai lifecycle test failed: {}", e);
|
||||
assert!(false, "Rhai script execution failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_engine_functionality() {
|
||||
println!("Testing basic Rhai engine functionality...");
|
||||
|
||||
let engine = create_service_manager_engine().expect("Failed to create Rhai engine");
|
||||
|
||||
// Test basic Rhai functionality
|
||||
let test_script = r#"
|
||||
let test_results = #{
|
||||
basic_math: 2 + 2 == 4,
|
||||
string_ops: "hello".len() == 5,
|
||||
array_ops: [1, 2, 3].len() == 3,
|
||||
map_ops: #{ a: 1, b: 2 }.len() == 2
|
||||
};
|
||||
|
||||
let all_passed = true;
|
||||
for result in test_results.values() {
|
||||
if !result {
|
||||
all_passed = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#{
|
||||
results: test_results,
|
||||
all_passed: all_passed
|
||||
}
|
||||
"#;
|
||||
|
||||
match engine.eval::<rhai::Dynamic>(test_script) {
|
||||
Ok(result) => {
|
||||
if let Some(map) = result.try_cast::<rhai::Map>() {
|
||||
if let Some(all_passed) = map.get("all_passed") {
|
||||
if let Some(passed) = all_passed.clone().try_cast::<bool>() {
|
||||
if passed {
|
||||
println!("✓ All basic Rhai functionality tests passed");
|
||||
} else {
|
||||
println!("✗ Some basic Rhai functionality tests failed");
|
||||
assert!(false, "Basic Rhai tests failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(results) = map.get("results") {
|
||||
if let Some(results_map) = results.clone().try_cast::<rhai::Map>() {
|
||||
println!("Detailed results:");
|
||||
for (test_name, result) in results_map.iter() {
|
||||
let status = if let Some(passed) = result.clone().try_cast::<bool>() {
|
||||
if passed {
|
||||
"✓"
|
||||
} else {
|
||||
"✗"
|
||||
}
|
||||
} else {
|
||||
"?"
|
||||
};
|
||||
println!(" {} {}: {:?}", status, test_name, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✗ Basic Rhai functionality test failed: {}", e);
|
||||
assert!(false, "Basic Rhai test failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_script_error_handling() {
|
||||
println!("Testing Rhai error handling...");
|
||||
|
||||
let engine = create_service_manager_engine().expect("Failed to create Rhai engine");
|
||||
|
||||
// Test script with intentional error
|
||||
let error_script = r#"
|
||||
let result = "test";
|
||||
result.non_existent_method(); // This should cause an error
|
||||
"#;
|
||||
|
||||
match engine.eval::<rhai::Dynamic>(error_script) {
|
||||
Ok(_) => {
|
||||
println!("⚠ Expected error but script succeeded");
|
||||
assert!(
|
||||
false,
|
||||
"Error handling test failed - expected error but got success"
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Error correctly caught: {}", e);
|
||||
// Verify it's the expected type of error
|
||||
assert!(e.to_string().contains("method") || e.to_string().contains("function"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_script_files_exist() {
|
||||
println!("Checking that Rhai test scripts exist...");
|
||||
|
||||
let script_files = [
|
||||
"tests/rhai/service_manager_basic.rhai",
|
||||
"tests/rhai/service_lifecycle.rhai",
|
||||
];
|
||||
|
||||
for script_file in &script_files {
|
||||
if Path::new(script_file).exists() {
|
||||
println!("✓ Found script: {}", script_file);
|
||||
|
||||
// Verify the file is readable and not empty
|
||||
match fs::read_to_string(script_file) {
|
||||
Ok(content) => {
|
||||
if content.trim().is_empty() {
|
||||
assert!(false, "Script file {} is empty", script_file);
|
||||
}
|
||||
println!(" Content length: {} characters", content.len());
|
||||
}
|
||||
Err(e) => {
|
||||
assert!(false, "Failed to read script file {}: {}", script_file, e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert!(false, "Required script file not found: {}", script_file);
|
||||
}
|
||||
}
|
||||
|
||||
println!("✓ All required Rhai script files exist and are readable");
|
||||
}
|
||||
@@ -1,317 +0,0 @@
|
||||
use sal_service_manager::{
|
||||
ServiceConfig, ServiceManager, ServiceManagerError, ServiceStatus, ZinitServiceManager,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
/// Helper function to find an available Zinit socket path
|
||||
async fn get_available_socket_path() -> Option<String> {
|
||||
let socket_paths = [
|
||||
"/var/run/zinit.sock",
|
||||
"/tmp/zinit.sock",
|
||||
"/run/zinit.sock",
|
||||
"./zinit.sock",
|
||||
];
|
||||
|
||||
for path in &socket_paths {
|
||||
// Try to create a ZinitServiceManager to test connectivity
|
||||
if let Ok(manager) = ZinitServiceManager::new(path) {
|
||||
// Test if we can list services (basic connectivity test)
|
||||
if manager.list().is_ok() {
|
||||
println!("✓ Found working Zinit socket at: {}", path);
|
||||
return Some(path.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Helper function to clean up test services
|
||||
async fn cleanup_test_service(manager: &dyn ServiceManager, service_name: &str) {
|
||||
let _ = manager.stop(service_name);
|
||||
let _ = manager.remove(service_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_zinit_service_manager_creation() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path);
|
||||
assert!(
|
||||
manager.is_ok(),
|
||||
"Should be able to create ZinitServiceManager"
|
||||
);
|
||||
|
||||
let manager = manager.unwrap();
|
||||
|
||||
// Test basic connectivity by listing services
|
||||
let list_result = manager.list();
|
||||
assert!(list_result.is_ok(), "Should be able to list services");
|
||||
|
||||
println!("✓ ZinitServiceManager created successfully");
|
||||
} else {
|
||||
println!("⚠ Skipping test_zinit_service_manager_creation: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_service_lifecycle() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path).expect("Failed to create manager");
|
||||
let service_name = "test-lifecycle-service";
|
||||
|
||||
// Clean up any existing service first
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
binary_path: "echo".to_string(),
|
||||
args: vec!["Hello from lifecycle test".to_string()],
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
// Test service creation and start
|
||||
println!("Testing service creation and start...");
|
||||
let start_result = manager.start(&config);
|
||||
match start_result {
|
||||
Ok(_) => {
|
||||
println!("✓ Service started successfully");
|
||||
|
||||
// Wait a bit for the service to run
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
// Test service exists
|
||||
let exists = manager.exists(service_name);
|
||||
assert!(exists.is_ok(), "Should be able to check if service exists");
|
||||
|
||||
if let Ok(true) = exists {
|
||||
println!("✓ Service exists check passed");
|
||||
|
||||
// Test service status
|
||||
let status_result = manager.status(service_name);
|
||||
match status_result {
|
||||
Ok(status) => {
|
||||
println!("✓ Service status: {:?}", status);
|
||||
assert!(
|
||||
matches!(status, ServiceStatus::Running | ServiceStatus::Stopped),
|
||||
"Service should be running or stopped (for oneshot)"
|
||||
);
|
||||
}
|
||||
Err(e) => println!("⚠ Status check failed: {}", e),
|
||||
}
|
||||
|
||||
// Test service logs
|
||||
let logs_result = manager.logs(service_name, None);
|
||||
match logs_result {
|
||||
Ok(logs) => {
|
||||
println!("✓ Retrieved logs: {}", logs.len());
|
||||
// For echo command, we should have some output
|
||||
assert!(
|
||||
!logs.is_empty() || logs.contains("Hello"),
|
||||
"Should have log output"
|
||||
);
|
||||
}
|
||||
Err(e) => println!("⚠ Logs retrieval failed: {}", e),
|
||||
}
|
||||
|
||||
// Test service list
|
||||
let list_result = manager.list();
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!("✓ Listed {} services", services.len());
|
||||
assert!(
|
||||
services.contains(&service_name.to_string()),
|
||||
"Service should appear in list"
|
||||
);
|
||||
}
|
||||
Err(e) => println!("⚠ List services failed: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
// Test service stop
|
||||
println!("Testing service stop...");
|
||||
let stop_result = manager.stop(service_name);
|
||||
match stop_result {
|
||||
Ok(_) => println!("✓ Service stopped successfully"),
|
||||
Err(e) => println!("⚠ Stop failed: {}", e),
|
||||
}
|
||||
|
||||
// Test service removal
|
||||
println!("Testing service removal...");
|
||||
let remove_result = manager.remove(service_name);
|
||||
match remove_result {
|
||||
Ok(_) => println!("✓ Service removed successfully"),
|
||||
Err(e) => println!("⚠ Remove failed: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("⚠ Service creation/start failed: {}", e);
|
||||
// This might be expected if zinit doesn't allow service creation
|
||||
}
|
||||
}
|
||||
|
||||
// Final cleanup
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
} else {
|
||||
println!("⚠ Skipping test_service_lifecycle: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_service_start_and_confirm() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path).expect("Failed to create manager");
|
||||
let service_name = "test-start-confirm-service";
|
||||
|
||||
// Clean up any existing service first
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
binary_path: "sleep".to_string(),
|
||||
args: vec!["5".to_string()], // Sleep for 5 seconds
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
// Test start_and_confirm with timeout
|
||||
println!("Testing start_and_confirm with timeout...");
|
||||
let start_result = manager.start_and_confirm(&config, 10);
|
||||
match start_result {
|
||||
Ok(_) => {
|
||||
println!("✓ Service started and confirmed successfully");
|
||||
|
||||
// Verify it's actually running
|
||||
let status = manager.status(service_name);
|
||||
match status {
|
||||
Ok(ServiceStatus::Running) => println!("✓ Service confirmed running"),
|
||||
Ok(other_status) => {
|
||||
println!("⚠ Service in unexpected state: {:?}", other_status)
|
||||
}
|
||||
Err(e) => println!("⚠ Status check failed: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("⚠ start_and_confirm failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Test start_existing_and_confirm
|
||||
println!("Testing start_existing_and_confirm...");
|
||||
let start_existing_result = manager.start_existing_and_confirm(service_name, 5);
|
||||
match start_existing_result {
|
||||
Ok(_) => println!("✓ start_existing_and_confirm succeeded"),
|
||||
Err(e) => println!("⚠ start_existing_and_confirm failed: {}", e),
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
} else {
|
||||
println!("⚠ Skipping test_service_start_and_confirm: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_service_restart() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path).expect("Failed to create manager");
|
||||
let service_name = "test-restart-service";
|
||||
|
||||
// Clean up any existing service first
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
binary_path: "echo".to_string(),
|
||||
args: vec!["Restart test".to_string()],
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
environment: HashMap::new(),
|
||||
auto_restart: true, // Enable auto-restart for this test
|
||||
};
|
||||
|
||||
// Start the service first
|
||||
let start_result = manager.start(&config);
|
||||
if start_result.is_ok() {
|
||||
// Wait for service to be established
|
||||
sleep(Duration::from_millis(1000)).await;
|
||||
|
||||
// Test restart
|
||||
println!("Testing service restart...");
|
||||
let restart_result = manager.restart(service_name);
|
||||
match restart_result {
|
||||
Ok(_) => {
|
||||
println!("✓ Service restarted successfully");
|
||||
|
||||
// Wait and check status
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
let status_result = manager.status(service_name);
|
||||
match status_result {
|
||||
Ok(status) => {
|
||||
println!("✓ Service state after restart: {:?}", status);
|
||||
}
|
||||
Err(e) => println!("⚠ Status check after restart failed: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("⚠ Restart failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
} else {
|
||||
println!("⚠ Skipping test_service_restart: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_handling() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path).expect("Failed to create manager");
|
||||
|
||||
// Test operations on non-existent service
|
||||
let non_existent_service = "non-existent-service-12345";
|
||||
|
||||
// Test status of non-existent service
|
||||
let status_result = manager.status(non_existent_service);
|
||||
match status_result {
|
||||
Err(ServiceManagerError::ServiceNotFound(_)) => {
|
||||
println!("✓ Correctly returned ServiceNotFound for non-existent service");
|
||||
}
|
||||
Err(other_error) => {
|
||||
println!(
|
||||
"⚠ Got different error for non-existent service: {}",
|
||||
other_error
|
||||
);
|
||||
}
|
||||
Ok(_) => {
|
||||
println!("⚠ Unexpectedly found non-existent service");
|
||||
}
|
||||
}
|
||||
|
||||
// Test exists for non-existent service
|
||||
let exists_result = manager.exists(non_existent_service);
|
||||
match exists_result {
|
||||
Ok(false) => println!("✓ Correctly reported non-existent service as not existing"),
|
||||
Ok(true) => println!("⚠ Incorrectly reported non-existent service as existing"),
|
||||
Err(e) => println!("⚠ Error checking existence: {}", e),
|
||||
}
|
||||
|
||||
// Test stop of non-existent service
|
||||
let stop_result = manager.stop(non_existent_service);
|
||||
match stop_result {
|
||||
Err(_) => println!("✓ Correctly failed to stop non-existent service"),
|
||||
Ok(_) => println!("⚠ Unexpectedly succeeded in stopping non-existent service"),
|
||||
}
|
||||
|
||||
println!("✓ Error handling tests completed");
|
||||
} else {
|
||||
println!("⚠ Skipping test_error_handling: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
32
src/lib.rs
32
src/lib.rs
@@ -36,47 +36,17 @@ pub enum Error {
|
||||
/// Result type for SAL operations
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
// Re-export modules conditionally based on features
|
||||
#[cfg(feature = "git")]
|
||||
pub use sal_git as git;
|
||||
|
||||
#[cfg(feature = "kubernetes")]
|
||||
pub use sal_kubernetes as kubernetes;
|
||||
|
||||
#[cfg(feature = "mycelium")]
|
||||
// Re-export modules
|
||||
pub use sal_mycelium as mycelium;
|
||||
|
||||
#[cfg(feature = "net")]
|
||||
pub use sal_net as net;
|
||||
|
||||
#[cfg(feature = "os")]
|
||||
pub use sal_os as os;
|
||||
|
||||
#[cfg(feature = "postgresclient")]
|
||||
pub use sal_postgresclient as postgresclient;
|
||||
|
||||
#[cfg(feature = "process")]
|
||||
pub use sal_process as process;
|
||||
|
||||
#[cfg(feature = "redisclient")]
|
||||
pub use sal_redisclient as redisclient;
|
||||
|
||||
#[cfg(feature = "rhai")]
|
||||
pub use sal_rhai as rhai;
|
||||
|
||||
#[cfg(feature = "service_manager")]
|
||||
pub use sal_service_manager as service_manager;
|
||||
|
||||
#[cfg(feature = "text")]
|
||||
pub use sal_text as text;
|
||||
|
||||
#[cfg(feature = "vault")]
|
||||
pub use sal_vault as vault;
|
||||
|
||||
#[cfg(feature = "virt")]
|
||||
pub use sal_virt as virt;
|
||||
|
||||
#[cfg(feature = "zinit_client")]
|
||||
pub use sal_zinit_client as zinit_client;
|
||||
|
||||
// Version information
|
||||
|
||||
@@ -1,16 +1,7 @@
|
||||
# SAL Text - Text Processing and Manipulation Utilities (`sal-text`)
|
||||
# SAL Text - Text Processing and Manipulation Utilities
|
||||
|
||||
SAL Text provides a comprehensive collection of text processing utilities for both Rust applications and Rhai scripting environments.
|
||||
|
||||
## Installation
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-text = "0.1.0"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Text Indentation**: Remove common leading whitespace (`dedent`) and add prefixes (`prefix`)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user