Compare commits

..

4 Commits

Author SHA1 Message Date
Sameh Abouel-saad
5014c2f4a5 feat: add Rhai scripting interface for RFS client operations 2025-07-02 15:09:35 +03:00
Sameh Abouel-saad
ba6f53a28a feat: add UserBlockInfo model to show block size instead of index 2025-06-24 18:13:02 +03:00
Sameh Abouel-saad
b81a0aa61c refactor: rename rfs-client to sal-rfs-client and integrate with workspace dependencies 2025-06-24 17:47:50 +03:00
Sameh Abouel-saad
b02101bd42 Implement rfs-client 2025-06-24 16:10:39 +03:00
303 changed files with 8492 additions and 16674 deletions

View File

@ -1,227 +0,0 @@
name: Publish SAL Crates
on:
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: 'Version to publish (e.g., 0.1.0)'
required: true
type: string
dry_run:
description: 'Dry run (do not actually publish)'
required: false
type: boolean
default: false
env:
CARGO_TERM_COLOR: always
jobs:
publish:
name: Publish to crates.io
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cache Cargo dependencies
uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Install cargo-edit for version management
run: cargo install cargo-edit
- name: Set version from release tag
if: github.event_name == 'release'
run: |
VERSION=${GITHUB_REF#refs/tags/v}
echo "PUBLISH_VERSION=$VERSION" >> $GITHUB_ENV
echo "Publishing version: $VERSION"
- name: Set version from workflow input
if: github.event_name == 'workflow_dispatch'
run: |
echo "PUBLISH_VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
echo "Publishing version: ${{ github.event.inputs.version }}"
- name: Update version in all crates
run: |
echo "Updating version to $PUBLISH_VERSION"
# Update root Cargo.toml
cargo set-version $PUBLISH_VERSION
# Update each crate
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
if [ -d "$crate" ]; then
cd "$crate"
cargo set-version $PUBLISH_VERSION
cd ..
echo "Updated $crate to version $PUBLISH_VERSION"
fi
done
- name: Run tests
run: cargo test --workspace --verbose
- name: Check formatting
run: cargo fmt --all -- --check
- name: Run clippy
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
- name: Dry run publish (check packages)
run: |
echo "Checking all packages can be published..."
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
if [ -d "$crate" ]; then
echo "Checking $crate..."
cd "$crate"
cargo publish --dry-run
cd ..
fi
done
echo "Checking main crate..."
cargo publish --dry-run
- name: Publish crates (dry run)
if: github.event.inputs.dry_run == 'true'
run: |
echo "🔍 DRY RUN MODE - Would publish the following crates:"
echo "Individual crates: sal-os, sal-process, sal-text, sal-net, sal-git, sal-vault, sal-kubernetes, sal-virt, sal-redisclient, sal-postgresclient, sal-zinit-client, sal-mycelium, sal-rhai"
echo "Meta-crate: sal"
echo "Version: $PUBLISH_VERSION"
- name: Publish individual crates
if: github.event.inputs.dry_run != 'true'
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: |
echo "Publishing individual crates..."
# Crates in dependency order
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
if [ -d "$crate" ]; then
echo "Publishing sal-$crate..."
cd "$crate"
# Retry logic for transient failures
for attempt in 1 2 3; do
if cargo publish --token $CARGO_REGISTRY_TOKEN; then
echo "✅ sal-$crate published successfully"
break
else
if [ $attempt -eq 3 ]; then
echo "❌ Failed to publish sal-$crate after 3 attempts"
exit 1
else
echo "⚠️ Attempt $attempt failed, retrying in 30 seconds..."
sleep 30
fi
fi
done
cd ..
# Wait for crates.io to process
if [ "$crate" != "rhai" ]; then
echo "⏳ Waiting 30 seconds for crates.io to process..."
sleep 30
fi
fi
done
- name: Publish main crate
if: github.event.inputs.dry_run != 'true'
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: |
echo "Publishing main sal crate..."
# Wait a bit longer before publishing the meta-crate
echo "⏳ Waiting 60 seconds for all individual crates to be available..."
sleep 60
# Retry logic for the main crate
for attempt in 1 2 3; do
if cargo publish --token $CARGO_REGISTRY_TOKEN; then
echo "✅ Main sal crate published successfully"
break
else
if [ $attempt -eq 3 ]; then
echo "❌ Failed to publish main sal crate after 3 attempts"
exit 1
else
echo "⚠️ Attempt $attempt failed, retrying in 60 seconds..."
sleep 60
fi
fi
done
- name: Create summary
if: always()
run: |
echo "## 📦 SAL Publishing Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Version:** $PUBLISH_VERSION" >> $GITHUB_STEP_SUMMARY
echo "**Trigger:** ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY
if [ "${{ github.event.inputs.dry_run }}" == "true" ]; then
echo "**Mode:** Dry Run" >> $GITHUB_STEP_SUMMARY
else
echo "**Mode:** Live Publishing" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Published Crates" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- sal-os" >> $GITHUB_STEP_SUMMARY
echo "- sal-process" >> $GITHUB_STEP_SUMMARY
echo "- sal-text" >> $GITHUB_STEP_SUMMARY
echo "- sal-net" >> $GITHUB_STEP_SUMMARY
echo "- sal-git" >> $GITHUB_STEP_SUMMARY
echo "- sal-vault" >> $GITHUB_STEP_SUMMARY
echo "- sal-kubernetes" >> $GITHUB_STEP_SUMMARY
echo "- sal-virt" >> $GITHUB_STEP_SUMMARY
echo "- sal-redisclient" >> $GITHUB_STEP_SUMMARY
echo "- sal-postgresclient" >> $GITHUB_STEP_SUMMARY
echo "- sal-zinit-client" >> $GITHUB_STEP_SUMMARY
echo "- sal-mycelium" >> $GITHUB_STEP_SUMMARY
echo "- sal-rhai" >> $GITHUB_STEP_SUMMARY
echo "- sal (meta-crate)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Usage" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo '```bash' >> $GITHUB_STEP_SUMMARY
echo "# Individual crates" >> $GITHUB_STEP_SUMMARY
echo "cargo add sal-os sal-process sal-text" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "# Meta-crate with features" >> $GITHUB_STEP_SUMMARY
echo "cargo add sal --features core" >> $GITHUB_STEP_SUMMARY
echo "cargo add sal --features all" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY

View File

@ -1,233 +0,0 @@
name: Test Publishing Setup
on:
push:
branches: [ main, master ]
paths:
- '**/Cargo.toml'
- 'scripts/publish-all.sh'
- '.github/workflows/publish.yml'
pull_request:
branches: [ main, master ]
paths:
- '**/Cargo.toml'
- 'scripts/publish-all.sh'
- '.github/workflows/publish.yml'
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
jobs:
test-publish-setup:
name: Test Publishing Setup
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cache Cargo dependencies
uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-publish-test-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-publish-test-
${{ runner.os }}-cargo-
- name: Install cargo-edit
run: cargo install cargo-edit
- name: Test workspace structure
run: |
echo "Testing workspace structure..."
# Check that all expected crates exist
EXPECTED_CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo)
for crate in "${EXPECTED_CRATES[@]}"; do
if [ -d "$crate" ] && [ -f "$crate/Cargo.toml" ]; then
echo "✅ $crate exists"
else
echo "❌ $crate missing or invalid"
exit 1
fi
done
- name: Test feature configuration
run: |
echo "Testing feature configuration..."
# Test that features work correctly
cargo check --features os
cargo check --features process
cargo check --features text
cargo check --features net
cargo check --features git
cargo check --features vault
cargo check --features kubernetes
cargo check --features virt
cargo check --features redisclient
cargo check --features postgresclient
cargo check --features zinit_client
cargo check --features mycelium
cargo check --features rhai
echo "✅ All individual features work"
# Test feature groups
cargo check --features core
cargo check --features clients
cargo check --features infrastructure
cargo check --features scripting
echo "✅ All feature groups work"
# Test all features
cargo check --features all
echo "✅ All features together work"
- name: Test dry-run publishing
run: |
echo "Testing dry-run publishing..."
# Test each individual crate can be packaged
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
echo "Testing sal-$crate..."
cd "$crate"
cargo publish --dry-run
cd ..
echo "✅ sal-$crate can be published"
done
# Test main crate
echo "Testing main sal crate..."
cargo publish --dry-run
echo "✅ Main sal crate can be published"
- name: Test publishing script
run: |
echo "Testing publishing script..."
# Make script executable
chmod +x scripts/publish-all.sh
# Test dry run
./scripts/publish-all.sh --dry-run --version 0.1.0-test
echo "✅ Publishing script works"
- name: Test version consistency
run: |
echo "Testing version consistency..."
# Get version from root Cargo.toml
ROOT_VERSION=$(grep '^version = ' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/')
echo "Root version: $ROOT_VERSION"
# Check all crates have the same version
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo)
for crate in "${CRATES[@]}"; do
if [ -f "$crate/Cargo.toml" ]; then
CRATE_VERSION=$(grep '^version = ' "$crate/Cargo.toml" | head -1 | sed 's/version = "\(.*\)"/\1/')
if [ "$CRATE_VERSION" = "$ROOT_VERSION" ]; then
echo "✅ $crate version matches: $CRATE_VERSION"
else
echo "❌ $crate version mismatch: $CRATE_VERSION (expected $ROOT_VERSION)"
exit 1
fi
fi
done
- name: Test metadata completeness
run: |
echo "Testing metadata completeness..."
# Check that all crates have required metadata
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
echo "Checking sal-$crate metadata..."
cd "$crate"
# Check required fields exist
if ! grep -q '^name = "sal-' Cargo.toml; then
echo "❌ $crate missing or incorrect name"
exit 1
fi
if ! grep -q '^description = ' Cargo.toml; then
echo "❌ $crate missing description"
exit 1
fi
if ! grep -q '^repository = ' Cargo.toml; then
echo "❌ $crate missing repository"
exit 1
fi
if ! grep -q '^license = ' Cargo.toml; then
echo "❌ $crate missing license"
exit 1
fi
echo "✅ sal-$crate metadata complete"
cd ..
done
- name: Test dependency resolution
run: |
echo "Testing dependency resolution..."
# Test that all workspace dependencies resolve correctly
cargo tree --workspace > /dev/null
echo "✅ All dependencies resolve correctly"
# Test that there are no dependency conflicts
cargo check --workspace
echo "✅ No dependency conflicts"
- name: Generate publishing report
if: always()
run: |
echo "## 🧪 Publishing Setup Test Report" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### ✅ Tests Passed" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- Workspace structure validation" >> $GITHUB_STEP_SUMMARY
echo "- Feature configuration testing" >> $GITHUB_STEP_SUMMARY
echo "- Dry-run publishing simulation" >> $GITHUB_STEP_SUMMARY
echo "- Publishing script validation" >> $GITHUB_STEP_SUMMARY
echo "- Version consistency check" >> $GITHUB_STEP_SUMMARY
echo "- Metadata completeness verification" >> $GITHUB_STEP_SUMMARY
echo "- Dependency resolution testing" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 📦 Ready for Publishing" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "All SAL crates are ready for publishing to crates.io!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Individual Crates:** 13 modules" >> $GITHUB_STEP_SUMMARY
echo "**Meta-crate:** sal with optional features" >> $GITHUB_STEP_SUMMARY
echo "**Binary:** herodo script executor" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 🚀 Next Steps" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "1. Create a release tag (e.g., v0.1.0)" >> $GITHUB_STEP_SUMMARY
echo "2. The publish workflow will automatically trigger" >> $GITHUB_STEP_SUMMARY
echo "3. All crates will be published to crates.io" >> $GITHUB_STEP_SUMMARY
echo "4. Users can install with: \`cargo add sal-os\` or \`cargo add sal --features all\`" >> $GITHUB_STEP_SUMMARY

2
.gitignore vendored
View File

@ -62,5 +62,3 @@ docusaurus.config.ts
sidebars.ts
tsconfig.json
Cargo.toml.bak
for_augment

View File

@ -11,35 +11,18 @@ categories = ["os", "filesystem", "api-bindings"]
readme = "README.md"
[workspace]
members = [
".",
"vault",
"git",
"redisclient",
"mycelium",
"text",
"os",
"net",
"zinit_client",
"process",
"virt",
"zos",
"postgresclient",
"kubernetes",
"rhai",
"herodo",
"service_manager",
]
members = [".", "vault", "git", "redisclient", "mycelium", "text", "os", "net", "zinit_client", "process", "virt", "postgresclient", "rhai", "herodo", "rfs-client"]
resolver = "2"
[workspace.metadata]
# Workspace-level metadata
rust-version = "1.70.0"
rust-version = "1.85.0"
[workspace.dependencies]
# Core shared dependencies with consistent versions
anyhow = "1.0.98"
base64 = "0.22.1"
bytes = "1.4.0"
dirs = "6.0.0"
env_logger = "0.11.8"
futures = "0.3.30"
@ -84,84 +67,22 @@ windows = { version = "0.61.1", features = [
] }
# Specialized dependencies
zinit-client = "0.4.0"
zinit-client = "0.3.0"
urlencoding = "2.1.3"
tokio-test = "0.4.4"
[dependencies]
thiserror = "2.0.12" # For error handling in the main Error enum
tokio = { workspace = true } # For async examples
# Optional dependencies - users can choose which modules to include
sal-git = { path = "git", optional = true }
sal-kubernetes = { path = "kubernetes", optional = true }
sal-redisclient = { path = "redisclient", optional = true }
sal-mycelium = { path = "mycelium", optional = true }
sal-text = { path = "text", optional = true }
sal-os = { path = "os", optional = true }
sal-net = { path = "net", optional = true }
sal-zinit-client = { path = "zinit_client", optional = true }
sal-process = { path = "process", optional = true }
sal-virt = { path = "virt", optional = true }
sal-postgresclient = { path = "postgresclient", optional = true }
sal-vault = { path = "vault", optional = true }
sal-rhai = { path = "rhai", optional = true }
sal-service-manager = { path = "service_manager", optional = true }
zinit-client.workspace = true
[features]
default = []
# Individual module features
git = ["dep:sal-git"]
kubernetes = ["dep:sal-kubernetes"]
redisclient = ["dep:sal-redisclient"]
mycelium = ["dep:sal-mycelium"]
text = ["dep:sal-text"]
os = ["dep:sal-os"]
net = ["dep:sal-net"]
zinit_client = ["dep:sal-zinit-client"]
process = ["dep:sal-process"]
virt = ["dep:sal-virt"]
postgresclient = ["dep:sal-postgresclient"]
vault = ["dep:sal-vault"]
rhai = ["dep:sal-rhai"]
service_manager = ["dep:sal-service-manager"]
# Convenience feature groups
core = ["os", "process", "text", "net"]
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium"]
infrastructure = ["git", "vault", "kubernetes", "virt", "service_manager"]
scripting = ["rhai"]
all = [
"git",
"kubernetes",
"redisclient",
"mycelium",
"text",
"os",
"net",
"zinit_client",
"process",
"virt",
"postgresclient",
"vault",
"rhai",
"service_manager",
]
# Examples
[[example]]
name = "postgres_cluster"
path = "examples/kubernetes/clusters/postgres.rs"
required-features = ["kubernetes"]
[[example]]
name = "redis_cluster"
path = "examples/kubernetes/clusters/redis.rs"
required-features = ["kubernetes"]
[[example]]
name = "generic_cluster"
path = "examples/kubernetes/clusters/generic.rs"
required-features = ["kubernetes"]
thiserror = "2.0.12" # For error handling in the main Error enum
sal-git = { path = "git" }
sal-redisclient = { path = "redisclient" }
sal-mycelium = { path = "mycelium" }
sal-text = { path = "text" }
sal-os = { path = "os" }
sal-net = { path = "net" }
sal-zinit-client = { path = "zinit_client" }
sal-process = { path = "process" }
sal-virt = { path = "virt" }
sal-postgresclient = { path = "postgresclient" }
sal-vault = { path = "vault" }
sal-rhai = { path = "rhai" }
sal-rfs-client = { path = "rfs-client" }

View File

@ -1,239 +0,0 @@
# SAL Publishing Guide
This guide explains how to publish SAL crates to crates.io and how users can consume them.
## 🎯 Publishing Strategy
SAL uses a **modular publishing approach** where each module is published as an individual crate. This allows users to install only the functionality they need, reducing compilation time and binary size.
## 📦 Crate Structure
### Individual Crates
Each SAL module is published as a separate crate:
| Crate Name | Description | Category |
|------------|-------------|----------|
| `sal-os` | Operating system operations | Core |
| `sal-process` | Process management | Core |
| `sal-text` | Text processing utilities | Core |
| `sal-net` | Network operations | Core |
| `sal-git` | Git repository management | Infrastructure |
| `sal-vault` | Cryptographic operations | Infrastructure |
| `sal-kubernetes` | Kubernetes cluster management | Infrastructure |
| `sal-virt` | Virtualization tools (Buildah, nerdctl) | Infrastructure |
| `sal-redisclient` | Redis database client | Clients |
| `sal-postgresclient` | PostgreSQL database client | Clients |
| `sal-zinit-client` | Zinit process supervisor client | Clients |
| `sal-mycelium` | Mycelium network client | Clients |
| `sal-rhai` | Rhai scripting integration | Scripting |
### Meta-crate
The main `sal` crate serves as a meta-crate that re-exports all modules with optional features:
```toml
[dependencies]
sal = { version = "0.1.0", features = ["os", "process", "text"] }
```
## 🚀 Publishing Process
### Prerequisites
1. **Crates.io Account**: Ensure you have a crates.io account and API token
2. **Repository Access**: Ensure the repository URL is accessible
3. **Version Consistency**: All crates should use the same version number
### Publishing Individual Crates
Each crate can be published independently:
```bash
# Publish core modules
cd os && cargo publish
cd ../process && cargo publish
cd ../text && cargo publish
cd ../net && cargo publish
# Publish infrastructure modules
cd ../git && cargo publish
cd ../vault && cargo publish
cd ../kubernetes && cargo publish
cd ../virt && cargo publish
# Publish client modules
cd ../redisclient && cargo publish
cd ../postgresclient && cargo publish
cd ../zinit_client && cargo publish
cd ../mycelium && cargo publish
# Publish scripting module
cd ../rhai && cargo publish
# Finally, publish the meta-crate
cd .. && cargo publish
```
### Automated Publishing
Use the comprehensive publishing script:
```bash
# Test the publishing process (safe)
./scripts/publish-all.sh --dry-run --version 0.1.0
# Actually publish to crates.io
./scripts/publish-all.sh --version 0.1.0
```
The script handles:
- ✅ **Dependency order** - Publishes crates in correct dependency order
- ✅ **Path dependencies** - Automatically updates path deps to version deps
- ✅ **Rate limiting** - Waits between publishes to avoid rate limits
- ✅ **Error handling** - Stops on failures with clear error messages
- ✅ **Dry run mode** - Test without actually publishing
## 👥 User Consumption
### Installation Options
#### Option 1: Individual Crates (Recommended)
Users install only what they need:
```bash
# Core functionality
cargo add sal-os sal-process sal-text sal-net
# Database operations
cargo add sal-redisclient sal-postgresclient
# Infrastructure management
cargo add sal-git sal-vault sal-kubernetes
# Service integration
cargo add sal-zinit-client sal-mycelium
# Scripting
cargo add sal-rhai
```
**Usage:**
```rust
use sal_os::fs;
use sal_process::run;
use sal_git::GitManager;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let files = fs::list_files(".")?;
let result = run::command("echo hello")?;
let git = GitManager::new(".")?;
Ok(())
}
```
#### Option 2: Meta-crate with Features
Users can use the main crate with selective features:
```bash
# Specific modules
cargo add sal --features os,process,text
# Feature groups
cargo add sal --features core # os, process, text, net
cargo add sal --features clients # redisclient, postgresclient, zinit_client, mycelium
cargo add sal --features infrastructure # git, vault, kubernetes, virt
cargo add sal --features scripting # rhai
# Everything
cargo add sal --features all
```
**Usage:**
```rust
// Cargo.toml: sal = { version = "0.1.0", features = ["os", "process", "git"] }
use sal::os::fs;
use sal::process::run;
use sal::git::GitManager;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let files = fs::list_files(".")?;
let result = run::command("echo hello")?;
let git = GitManager::new(".")?;
Ok(())
}
```
### Feature Groups
The meta-crate provides convenient feature groups:
- **`core`**: Essential system operations (os, process, text, net)
- **`clients`**: Database and service clients (redisclient, postgresclient, zinit_client, mycelium)
- **`infrastructure`**: Infrastructure management tools (git, vault, kubernetes, virt)
- **`scripting`**: Rhai scripting support (rhai)
- **`all`**: Everything included
## 📋 Version Management
### Semantic Versioning
All SAL crates follow semantic versioning:
- **Major version**: Breaking API changes
- **Minor version**: New features, backward compatible
- **Patch version**: Bug fixes, backward compatible
### Synchronized Releases
All crates are released with the same version number to ensure compatibility:
```toml
# All crates use the same version
sal-os = "0.1.0"
sal-process = "0.1.0"
sal-git = "0.1.0"
# etc.
```
## 🔧 Maintenance
### Updating Dependencies
When updating dependencies:
1. Update `Cargo.toml` in the workspace root
2. Update individual crate dependencies if needed
3. Test all crates: `cargo test --workspace`
4. Publish with incremented version numbers
### Adding New Modules
To add a new SAL module:
1. Create the new crate directory
2. Add to workspace members in root `Cargo.toml`
3. Add optional dependency in root `Cargo.toml`
4. Add feature flag in root `Cargo.toml`
5. Add conditional re-export in `src/lib.rs`
6. Update documentation
## 🎉 Benefits
### For Users
- **Minimal Dependencies**: Install only what you need
- **Faster Builds**: Smaller dependency trees compile faster
- **Smaller Binaries**: Reduced binary size
- **Clear Dependencies**: Explicit about what functionality is used
### For Maintainers
- **Independent Releases**: Can release individual crates as needed
- **Focused Testing**: Test individual modules in isolation
- **Clear Ownership**: Each crate has clear responsibility
- **Easier Maintenance**: Smaller, focused codebases
This publishing strategy provides the best of both worlds: modularity for users who want minimal dependencies, and convenience for users who prefer a single crate with features.

324
README.md
View File

@ -1,148 +1,228 @@
# SAL (System Abstraction Layer)
**Version 0.1.0** - A modular Rust library for cross-platform system operations and automation.
**Version: 0.1.0**
SAL provides a unified interface for system operations with Rhai scripting support through the `herodo` tool.
SAL is a comprehensive Rust library designed to provide a unified and simplified interface for a wide array of system-level operations and interactions. It abstracts platform-specific details, enabling developers to write robust, cross-platform code with greater ease. SAL also includes `herodo`, a powerful command-line tool for executing Rhai scripts that leverage SAL's capabilities for automation and system management tasks.
## Installation
## 🏗️ **Cargo Workspace Structure**
### Individual Packages (Recommended)
SAL is organized as a **Cargo workspace** with 16 specialized crates:
```bash
# Core functionality
cargo add sal-os sal-process sal-text sal-net
- **Root Package**: `sal` - Umbrella crate that re-exports all modules
- **13 Library Crates**: Specialized SAL modules (git, text, os, net, etc.)
- **1 Binary Crate**: `herodo` - Rhai script execution engine
- **1 Integration Crate**: `rhai` - Rhai scripting integration layer
# Infrastructure
cargo add sal-git sal-vault sal-kubernetes sal-virt
This workspace structure provides excellent build performance, dependency management, and maintainability.
# Database clients
cargo add sal-redisclient sal-postgresclient sal-zinit-client
# Scripting
cargo add sal-rhai
```
### Meta-package with Features
```bash
cargo add sal --features core # os, process, text, net
cargo add sal --features infrastructure # git, vault, kubernetes, virt
cargo add sal --features all # everything
```
### Herodo Script Runner
```bash
cargo install herodo
```
## Quick Start
### Rust Library Usage
```rust
use sal_os::fs;
use sal_process::run;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let files = fs::list_files(".")?;
println!("Found {} files", files.len());
let result = run::command("echo hello")?;
println!("Output: {}", result.stdout);
Ok(())
}
```
### Herodo Scripting
```bash
# Create script
cat > example.rhai << 'EOF'
let files = find_files(".", "*.rs");
print("Found " + files.len() + " Rust files");
let result = run("echo 'Hello from SAL!'");
print("Output: " + result.stdout);
EOF
# Run script
herodo example.rhai
```
## Available Packages
| Package | Description |
|---------|-------------|
| [`sal-os`](https://crates.io/crates/sal-os) | Operating system operations |
| [`sal-process`](https://crates.io/crates/sal-process) | Process management |
| [`sal-text`](https://crates.io/crates/sal-text) | Text processing |
| [`sal-net`](https://crates.io/crates/sal-net) | Network operations |
| [`sal-git`](https://crates.io/crates/sal-git) | Git repository management |
| [`sal-vault`](https://crates.io/crates/sal-vault) | Cryptographic operations |
| [`sal-kubernetes`](https://crates.io/crates/sal-kubernetes) | Kubernetes management |
| [`sal-virt`](https://crates.io/crates/sal-virt) | Virtualization tools |
| [`sal-redisclient`](https://crates.io/crates/sal-redisclient) | Redis client |
| [`sal-postgresclient`](https://crates.io/crates/sal-postgresclient) | PostgreSQL client |
| [`sal-zinit-client`](https://crates.io/crates/sal-zinit-client) | Zinit process supervisor |
| [`sal-mycelium`](https://crates.io/crates/sal-mycelium) | Mycelium network client |
| [`sal-service-manager`](https://crates.io/crates/sal-service-manager) | Service management |
| [`sal-rhai`](https://crates.io/crates/sal-rhai) | Rhai scripting integration |
| [`sal`](https://crates.io/crates/sal) | Meta-crate with features |
| [`herodo`](https://crates.io/crates/herodo) | Script executor binary |
## Building & Testing
```bash
# Build all packages
cargo build --workspace
# Run tests
cargo test --workspace
# Run Rhai integration tests
./run_rhai_tests.sh
```
### **🚀 Workspace Benefits**
- **Unified Dependency Management**: Shared dependencies across all crates with consistent versions
- **Optimized Build Performance**: Parallel compilation and shared build artifacts
- **Simplified Testing**: Run tests across all modules with a single command
- **Modular Architecture**: Each module is independently maintainable while sharing common infrastructure
- **Production Ready**: 100% test coverage with comprehensive Rhai integration tests
## Core Features
- **System Operations**: File/directory management, environment access, OS commands
- **Process Management**: Create, monitor, and control system processes
- **Containerization**: Buildah and nerdctl integration
- **Version Control**: Git repository operations
- **Database Clients**: Redis and PostgreSQL support
- **Networking**: HTTP, TCP, SSH connectivity utilities
- **Cryptography**: Key management, encryption, digital signatures
- **Text Processing**: String manipulation and templating
- **Scripting**: Rhai script execution via `herodo`
SAL offers a broad spectrum of functionalities, including:
## Herodo Scripting
- **System Operations**: File and directory management, environment variable access, system information retrieval, and OS-specific commands.
- **Process Management**: Create, monitor, control, and interact with system processes.
- **Containerization Tools**:
- Integration with **Buildah** for building OCI/Docker-compatible container images.
- Integration with **nerdctl** for managing containers (run, stop, list, build, etc.).
- **Version Control**: Programmatic interaction with Git repositories (clone, commit, push, pull, status, etc.).
- **Database Clients**:
- **Redis**: Robust client for interacting with Redis servers.
- **PostgreSQL**: Client for executing queries and managing PostgreSQL databases.
- **Scripting Engine**: In-built support for the **Rhai** scripting language, allowing SAL functionalities to be scripted and automated, primarily through the `herodo` tool.
- **Networking & Services**:
- **Mycelium**: Tools for Mycelium network peer management and message passing.
- **Zinit**: Client for interacting with the Zinit process supervision system.
- **RFS (Remote/Virtual Filesystem)**: Mount, manage, pack, and unpack various types of filesystems (local, SSH, S3, WebDAV).
- **Text Processing**: A suite of utilities for text manipulation, formatting, and regular expressions.
- **Cryptography (`vault`)**: Functions for common cryptographic operations.
`herodo` executes Rhai scripts with access to all SAL modules:
## `herodo`: The SAL Scripting Tool
`herodo` is a command-line utility bundled with SAL that executes Rhai scripts. It empowers users to automate tasks and orchestrate complex workflows by leveraging SAL's diverse modules directly from scripts.
### Usage
```bash
herodo script.rhai # Run single script
herodo script.rhai arg1 arg2 # With arguments
herodo /path/to/scripts/ # Run all .rhai files in directory
# Execute a single Rhai script
herodo script.rhai
# Execute a script with arguments
herodo script.rhai arg1 arg2
# Execute all .rhai scripts in a directory
herodo /path/to/scripts/
```
### Example Script
If a directory is provided, `herodo` will execute all `.rhai` scripts within that directory (and its subdirectories) in alphabetical order.
### Scriptable SAL Modules via `herodo`
The following SAL modules and functionalities are exposed to the Rhai scripting environment through `herodo`:
- **OS (`os`)**: Comprehensive file system operations, file downloading & installation, and system package management. [Documentation](os/README.md)
- **Process (`process`)**: Robust command and script execution, plus process management (listing, finding, killing, checking command existence). [Documentation](process/README.md)
- **Text (`text`)**: String manipulation, prefixing, path/name fixing, text replacement, and templating. [Documentation](text/README.md)
- **Net (`net`)**: Network operations, HTTP requests, and connectivity utilities. [Documentation](net/README.md)
- **Git (`git`)**: High-level repository management and generic Git command execution with Redis-backed authentication (clone, pull, push, commit, etc.). [Documentation](git/README.md)
- **Vault (`vault`)**: Cryptographic operations, keypair management, encryption, decryption, hashing, etc. [Documentation](vault/README.md)
- **Redis Client (`redisclient`)**: Execute Redis commands (`redis_get`, `redis_set`, `redis_execute`, etc.). [Documentation](redisclient/README.md)
- **PostgreSQL Client (`postgresclient`)**: Execute SQL queries against PostgreSQL databases. [Documentation](postgresclient/README.md)
- **Zinit (`zinit_client`)**: Client for Zinit process supervisor (service management, logs). [Documentation](zinit_client/README.md)
- **Mycelium (`mycelium`)**: Client for Mycelium decentralized networking API (node info, peer management, messaging). [Documentation](mycelium/README.md)
- **Virtualization (`virt`)**:
- **Buildah**: OCI/Docker image building functions. [Documentation](virt/README.md)
- **nerdctl**: Container lifecycle management (`nerdctl_run`, `nerdctl_stop`, `nerdctl_images`, `nerdctl_image_build`, etc.)
- **RFS**: Mount various filesystems (local, SSH, S3, etc.), pack/unpack filesystem layers.
### Example `herodo` Rhai Script
```rhai
// File operations
let files = find_files(".", "*.rs");
print("Found " + files.len() + " Rust files");
// file: /opt/scripts/example_task.rhai
// Process execution
let result = run("echo 'Hello SAL!'");
print("Output: " + result.stdout);
// OS operations
println("Checking for /tmp/my_app_data...");
if !exist("/tmp/my_app_data") {
mkdir("/tmp/my_app_data");
println("Created directory /tmp/my_app_data");
}
// Redis operations
redis_set("status", "running");
let status = redis_get("status");
print("Status: " + status);
println("Setting Redis key 'app_status' to 'running'");
redis_set("app_status", "running");
let status = redis_get("app_status");
println("Current app_status from Redis: " + status);
// Process execution
println("Listing files in /tmp:");
let output = run("ls -la /tmp");
println(output.stdout);
println("Script finished.");
```
Run with: `herodo /opt/scripts/example_task.rhai`
For more examples, check the individual module test directories (e.g., `text/tests/rhai/`, `os/tests/rhai/`, etc.) in this repository.
## Using SAL as a Rust Library
Add SAL as a dependency to your `Cargo.toml`:
```toml
[dependencies]
sal = "0.1.0" # Or the latest version
```
### Rust Example: Using Redis Client
```rust
use sal::redisclient::{get_global_client, execute_cmd_with_args};
use redis::RedisResult;
async fn example_redis_interaction() -> RedisResult<()> {
// Get a connection from the global pool
let mut conn = get_global_client().await?.get_async_connection().await?;
// Set a value
execute_cmd_with_args(&mut conn, "SET", vec!["my_key", "my_value"]).await?;
println!("Set 'my_key' to 'my_value'");
// Get a value
let value: String = execute_cmd_with_args(&mut conn, "GET", vec!["my_key"]).await?;
println!("Retrieved value for 'my_key': {}", value);
Ok(())
}
#[tokio::main]
async fn main() {
if let Err(e) = example_redis_interaction().await {
eprintln!("Redis Error: {}", e);
}
}
```
*(Note: The Redis client API might have evolved; please refer to `src/redisclient/mod.rs` and its documentation for the most current usage.)*
## 📦 **Workspace Modules Overview**
SAL is organized as a Cargo workspace with the following crates:
### **Core Library Modules**
- **`sal-os`**: Core OS interactions, file system operations, environment access
- **`sal-process`**: Process creation, management, and control
- **`sal-text`**: Utilities for text processing and manipulation
- **`sal-net`**: Network operations, HTTP requests, and connectivity utilities
### **Integration Modules**
- **`sal-git`**: Git repository management and operations
- **`sal-vault`**: Cryptographic functions and keypair management
- **`sal-rhai`**: Integration layer for the Rhai scripting engine, used by `herodo`
### **Client Modules**
- **`sal-redisclient`**: Client for Redis database interactions
- **`sal-postgresclient`**: Client for PostgreSQL database interactions
- **`sal-zinit-client`**: Client for Zinit process supervisor
- **`sal-mycelium`**: Client for Mycelium network operations
### **Specialized Modules**
- **`sal-virt`**: Virtualization-related utilities (buildah, nerdctl, rfs)
### **Root Package & Binary**
- **`sal`**: Root umbrella crate that re-exports all modules
- **`herodo`**: Command-line binary for executing Rhai scripts
## 🔨 **Building SAL**
Build the entire workspace (all crates) using Cargo:
```bash
# Build all workspace members
cargo build --workspace
# Build for release
cargo build --workspace --release
# Build specific crate
cargo build -p sal-text
cargo build -p herodo
```
The `herodo` executable will be located at `target/debug/herodo` or `target/release/herodo`.
## 🧪 **Running Tests**
### **Rust Unit Tests**
```bash
# Run all workspace tests
cargo test --workspace
# Run tests for specific crate
cargo test -p sal-text
cargo test -p sal-os
# Run only library tests (faster)
cargo test --workspace --lib
```
### **Rhai Integration Tests**
Run comprehensive Rhai script tests that exercise `herodo` and SAL's scripted functionalities:
```bash
# Run all Rhai integration tests (16 modules)
./run_rhai_tests.sh
# Results: 16/16 modules pass with 100% success rate
```
The Rhai tests validate real-world functionality across all SAL modules and provide comprehensive integration testing.
## License
Licensed under the Apache License 2.0. See [LICENSE](LICENSE) for details.
SAL is licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for details.

View File

@ -1,76 +1,64 @@
# SAL Vault Examples
# Hero Vault Cryptography Examples
This directory contains examples demonstrating the SAL Vault functionality.
This directory contains examples demonstrating the Hero Vault cryptography functionality integrated into the SAL project.
## Overview
SAL Vault provides secure key management and cryptographic operations including:
Hero Vault provides cryptographic operations including:
- Vault creation and management
- KeySpace operations (encrypted key-value stores)
- Symmetric key generation and operations
- Asymmetric key operations (signing and verification)
- Secure key derivation from passwords
- Key space management (creation, loading, encryption, decryption)
- Keypair management (creation, selection, listing)
- Digital signatures (signing and verification)
- Symmetric encryption (key generation, encryption, decryption)
- Ethereum wallet functionality
- Smart contract interactions
- Key-value store with encryption
## Current Status
## Example Files
⚠️ **Note**: The vault module is currently being updated to use Lee's implementation.
The Rhai scripting integration is temporarily disabled while we adapt the examples
to work with the new vault API.
- `example.rhai` - Basic example demonstrating key management, signing, and encryption
- `advanced_example.rhai` - Advanced example with error handling, conditional logic, and more complex operations
- `key_persistence_example.rhai` - Demonstrates creating and saving a key space to disk
- `load_existing_space.rhai` - Shows how to load a previously created key space and use its keypairs
- `contract_example.rhai` - Demonstrates loading a contract ABI and interacting with smart contracts
- `agung_send_transaction.rhai` - Demonstrates sending native tokens on the Agung network
- `agung_contract_with_args.rhai` - Shows how to interact with contracts with arguments on Agung
## Available Operations
## Running the Examples
- **Vault Management**: Create and manage vault instances
- **KeySpace Operations**: Open encrypted key-value stores within vaults
- **Symmetric Encryption**: Generate keys and encrypt/decrypt data
- **Asymmetric Operations**: Create keypairs, sign messages, verify signatures
You can run the examples using the `herodo` tool that comes with the SAL project:
## Example Files (Legacy - Sameh's Implementation)
```bash
# Run a single example
herodo --path example.rhai
⚠️ **These examples are currently archived and use the previous vault implementation**:
- `_archive/example.rhai` - Basic example demonstrating key management, signing, and encryption
- `_archive/advanced_example.rhai` - Advanced example with error handling and complex operations
- `_archive/key_persistence_example.rhai` - Demonstrates creating and saving a key space to disk
- `_archive/load_existing_space.rhai` - Shows how to load a previously created key space
- `_archive/contract_example.rhai` - Demonstrates smart contract interactions (Ethereum)
- `_archive/agung_send_transaction.rhai` - Demonstrates Ethereum transactions on Agung network
- `_archive/agung_contract_with_args.rhai` - Shows contract interactions with arguments
## Current Implementation (Lee's Vault)
The current vault implementation provides:
```rust
// Create a new vault
let vault = Vault::new(&path).await?;
// Open an encrypted keyspace
let keyspace = vault.open_keyspace("my_space", "password").await?;
// Perform cryptographic operations
// (API documentation coming soon)
# Run all examples using the provided script
./run_examples.sh
```
## Migration Status
## Key Space Storage
- ✅ **Vault Core**: Lee's implementation is active
- ✅ **Archive**: Sameh's implementation preserved in `vault/_archive/`
- ⏳ **Rhai Integration**: Being developed for Lee's implementation
- ⏳ **Examples**: Will be updated to use Lee's API
- ❌ **Ethereum Features**: Not available in Lee's implementation
Key spaces are stored in the `~/.hero-vault/key-spaces/` directory by default. Each key space is stored in a separate JSON file named after the key space (e.g., `my_space.json`).
## Ethereum Functionality
The Hero Vault module provides comprehensive Ethereum wallet functionality:
- Creating and managing wallets for different networks
- Sending ETH transactions
- Checking balances
- Interacting with smart contracts (read and write functions)
- Support for multiple networks (Ethereum, Gnosis, Peaq, Agung, etc.)
## Security
The vault uses:
Key spaces are encrypted with ChaCha20Poly1305 using a key derived from the provided password. The encryption ensures that the key material is secure at rest.
- **ChaCha20Poly1305** for symmetric encryption
- **Password-based key derivation** for keyspace encryption
- **Secure key storage** with proper isolation
## Best Practices
## Next Steps
1. **Rhai Integration**: Implement Rhai bindings for Lee's vault
2. **New Examples**: Create examples using Lee's simpler API
3. **Documentation**: Complete API documentation for Lee's implementation
4. **Migration Guide**: Provide guidance for users migrating from Sameh's implementation
1. **Use Strong Passwords**: Since the security of your key spaces depends on the strength of your passwords, use strong, unique passwords.
2. **Backup Key Spaces**: Regularly backup your key spaces directory to prevent data loss.
3. **Script Organization**: Split your scripts into logical units, with separate scripts for key creation and key usage.
4. **Error Handling**: Always check the return values of functions to ensure operations succeeded before proceeding.
5. **Network Selection**: When working with Ethereum functionality, be explicit about which network you're targeting to avoid confusion.
6. **Gas Management**: For Ethereum transactions, consider gas costs and set appropriate gas limits.

View File

@ -1,72 +0,0 @@
//! Basic Kubernetes operations example
//!
//! This script demonstrates basic Kubernetes operations using the SAL Kubernetes module.
//!
//! Prerequisites:
//! - A running Kubernetes cluster
//! - Valid kubeconfig file or in-cluster configuration
//! - Appropriate permissions for the operations
//!
//! Usage:
//! herodo examples/kubernetes/basic_operations.rhai
print("=== SAL Kubernetes Basic Operations Example ===");
// Create a KubernetesManager for the default namespace
print("Creating KubernetesManager for 'default' namespace...");
let km = kubernetes_manager_new("default");
print("✓ KubernetesManager created for namespace: " + namespace(km));
// List all pods in the namespace
print("\n--- Listing Pods ---");
let pods = pods_list(km);
print("Found " + pods.len() + " pods in the namespace:");
for pod in pods {
print(" - " + pod);
}
// List all services in the namespace
print("\n--- Listing Services ---");
let services = services_list(km);
print("Found " + services.len() + " services in the namespace:");
for service in services {
print(" - " + service);
}
// List all deployments in the namespace
print("\n--- Listing Deployments ---");
let deployments = deployments_list(km);
print("Found " + deployments.len() + " deployments in the namespace:");
for deployment in deployments {
print(" - " + deployment);
}
// Get resource counts
print("\n--- Resource Counts ---");
let counts = resource_counts(km);
print("Resource counts in namespace '" + namespace(km) + "':");
for resource_type in counts.keys() {
print(" " + resource_type + ": " + counts[resource_type]);
}
// List all namespaces (cluster-wide operation)
print("\n--- Listing All Namespaces ---");
let namespaces = namespaces_list(km);
print("Found " + namespaces.len() + " namespaces in the cluster:");
for ns in namespaces {
print(" - " + ns);
}
// Check if specific namespaces exist
print("\n--- Checking Namespace Existence ---");
let test_namespaces = ["default", "kube-system", "non-existent-namespace"];
for ns in test_namespaces {
let exists = namespace_exists(km, ns);
if exists {
print("✓ Namespace '" + ns + "' exists");
} else {
print("✗ Namespace '" + ns + "' does not exist");
}
}
print("\n=== Example completed successfully! ===");

View File

@ -1,134 +0,0 @@
//! Generic Application Deployment Example
//!
//! This example shows how to deploy any containerized application using the
//! KubernetesManager convenience methods. This works for any Docker image.
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create Kubernetes manager
let km = KubernetesManager::new("default").await?;
// Clean up any existing resources first
println!("=== Cleaning up existing resources ===");
let apps_to_clean = ["web-server", "node-app", "mongodb"];
for app in &apps_to_clean {
match km.deployment_delete(app).await {
Ok(_) => println!("✓ Deleted existing deployment: {}", app),
Err(_) => println!("✓ No existing deployment to delete: {}", app),
}
match km.service_delete(app).await {
Ok(_) => println!("✓ Deleted existing service: {}", app),
Err(_) => println!("✓ No existing service to delete: {}", app),
}
}
// Example 1: Simple web server deployment
println!("\n=== Example 1: Simple Nginx Web Server ===");
km.deploy_application("web-server", "nginx:latest", 2, 80, None, None)
.await?;
println!("✅ Nginx web server deployed!");
// Example 2: Node.js application with labels
println!("\n=== Example 2: Node.js Application ===");
let mut node_labels = HashMap::new();
node_labels.insert("app".to_string(), "node-app".to_string());
node_labels.insert("tier".to_string(), "backend".to_string());
node_labels.insert("environment".to_string(), "production".to_string());
// Configure Node.js environment variables
let mut node_env_vars = HashMap::new();
node_env_vars.insert("NODE_ENV".to_string(), "production".to_string());
node_env_vars.insert("PORT".to_string(), "3000".to_string());
node_env_vars.insert("LOG_LEVEL".to_string(), "info".to_string());
node_env_vars.insert("MAX_CONNECTIONS".to_string(), "1000".to_string());
km.deploy_application(
"node-app", // name
"node:18-alpine", // image
3, // replicas - scale to 3 instances
3000, // port
Some(node_labels), // labels
Some(node_env_vars), // environment variables
)
.await?;
println!("✅ Node.js application deployed!");
// Example 3: Database deployment (any database)
println!("\n=== Example 3: MongoDB Database ===");
let mut mongo_labels = HashMap::new();
mongo_labels.insert("app".to_string(), "mongodb".to_string());
mongo_labels.insert("type".to_string(), "database".to_string());
mongo_labels.insert("engine".to_string(), "mongodb".to_string());
// Configure MongoDB environment variables
let mut mongo_env_vars = HashMap::new();
mongo_env_vars.insert(
"MONGO_INITDB_ROOT_USERNAME".to_string(),
"admin".to_string(),
);
mongo_env_vars.insert(
"MONGO_INITDB_ROOT_PASSWORD".to_string(),
"mongopassword".to_string(),
);
mongo_env_vars.insert("MONGO_INITDB_DATABASE".to_string(), "myapp".to_string());
km.deploy_application(
"mongodb", // name
"mongo:6.0", // image
1, // replicas - single instance for simplicity
27017, // port
Some(mongo_labels), // labels
Some(mongo_env_vars), // environment variables
)
.await?;
println!("✅ MongoDB deployed!");
// Check status of all deployments
println!("\n=== Checking Deployment Status ===");
let deployments = km.deployments_list().await?;
for deployment in &deployments {
if let Some(name) = &deployment.metadata.name {
let total_replicas = deployment
.spec
.as_ref()
.and_then(|s| s.replicas)
.unwrap_or(0);
let ready_replicas = deployment
.status
.as_ref()
.and_then(|s| s.ready_replicas)
.unwrap_or(0);
println!(
"{}: {}/{} replicas ready",
name, ready_replicas, total_replicas
);
}
}
println!("\n🎉 All deployments completed!");
println!("\n💡 Key Points:");
println!(" • Any Docker image can be deployed using this simple interface");
println!(" • Use labels to organize and identify your applications");
println!(
" • The same method works for databases, web servers, APIs, and any containerized app"
);
println!(" • For advanced configuration, use the individual KubernetesManager methods");
println!(
" • Environment variables and resource limits can be added via direct Kubernetes API"
);
Ok(())
}

View File

@ -1,79 +0,0 @@
//! PostgreSQL Cluster Deployment Example (Rhai)
//!
//! This script shows how to deploy a PostgreSQL cluster using Rhai scripting
//! with the KubernetesManager convenience methods.
print("=== PostgreSQL Cluster Deployment ===");
// Create Kubernetes manager for the database namespace
print("Creating Kubernetes manager for 'database' namespace...");
let km = kubernetes_manager_new("database");
print("✓ Kubernetes manager created");
// Create the namespace if it doesn't exist
print("Creating namespace 'database' if it doesn't exist...");
try {
create_namespace(km, "database");
print("✓ Namespace 'database' created");
} catch(e) {
if e.to_string().contains("already exists") {
print("✓ Namespace 'database' already exists");
} else {
print("⚠️ Warning: " + e);
}
}
// Clean up any existing resources first
print("\nCleaning up any existing PostgreSQL resources...");
try {
delete_deployment(km, "postgres-cluster");
print("✓ Deleted existing deployment");
} catch(e) {
print("✓ No existing deployment to delete");
}
try {
delete_service(km, "postgres-cluster");
print("✓ Deleted existing service");
} catch(e) {
print("✓ No existing service to delete");
}
// Create PostgreSQL cluster using the convenience method
print("\nDeploying PostgreSQL cluster...");
try {
// Deploy PostgreSQL using the convenience method
let result = deploy_application(km, "postgres-cluster", "postgres:15", 2, 5432, #{
"app": "postgres-cluster",
"type": "database",
"engine": "postgresql"
}, #{
"POSTGRES_DB": "myapp",
"POSTGRES_USER": "postgres",
"POSTGRES_PASSWORD": "secretpassword",
"PGDATA": "/var/lib/postgresql/data/pgdata"
});
print("✓ " + result);
print("\n✅ PostgreSQL cluster deployed successfully!");
print("\n📋 Connection Information:");
print(" Host: postgres-cluster.database.svc.cluster.local");
print(" Port: 5432");
print(" Database: postgres (default)");
print(" Username: postgres (default)");
print("\n🔧 To connect from another pod:");
print(" psql -h postgres-cluster.database.svc.cluster.local -U postgres");
print("\n💡 Next steps:");
print(" • Set POSTGRES_PASSWORD environment variable");
print(" • Configure persistent storage");
print(" • Set up backup and monitoring");
} catch(e) {
print("❌ Failed to deploy PostgreSQL cluster: " + e);
}
print("\n=== Deployment Complete ===");

View File

@ -1,112 +0,0 @@
//! PostgreSQL Cluster Deployment Example
//!
//! This example shows how to deploy a PostgreSQL cluster using the
//! KubernetesManager convenience methods.
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create Kubernetes manager for the database namespace
let km = KubernetesManager::new("database").await?;
// Create the namespace if it doesn't exist
println!("Creating namespace 'database' if it doesn't exist...");
match km.namespace_create("database").await {
Ok(_) => println!("✓ Namespace 'database' created"),
Err(e) => {
if e.to_string().contains("already exists") {
println!("✓ Namespace 'database' already exists");
} else {
return Err(e.into());
}
}
}
// Clean up any existing resources first
println!("Cleaning up any existing PostgreSQL resources...");
match km.deployment_delete("postgres-cluster").await {
Ok(_) => println!("✓ Deleted existing deployment"),
Err(_) => println!("✓ No existing deployment to delete"),
}
match km.service_delete("postgres-cluster").await {
Ok(_) => println!("✓ Deleted existing service"),
Err(_) => println!("✓ No existing service to delete"),
}
// Configure PostgreSQL-specific labels
let mut labels = HashMap::new();
labels.insert("app".to_string(), "postgres-cluster".to_string());
labels.insert("type".to_string(), "database".to_string());
labels.insert("engine".to_string(), "postgresql".to_string());
// Configure PostgreSQL environment variables
let mut env_vars = HashMap::new();
env_vars.insert("POSTGRES_DB".to_string(), "myapp".to_string());
env_vars.insert("POSTGRES_USER".to_string(), "postgres".to_string());
env_vars.insert(
"POSTGRES_PASSWORD".to_string(),
"secretpassword".to_string(),
);
env_vars.insert(
"PGDATA".to_string(),
"/var/lib/postgresql/data/pgdata".to_string(),
);
// Deploy the PostgreSQL cluster using the convenience method
println!("Deploying PostgreSQL cluster...");
km.deploy_application(
"postgres-cluster", // name
"postgres:15", // image
2, // replicas (1 master + 1 replica)
5432, // port
Some(labels), // labels
Some(env_vars), // environment variables
)
.await?;
println!("✅ PostgreSQL cluster deployed successfully!");
// Check deployment status
let deployments = km.deployments_list().await?;
let postgres_deployment = deployments
.iter()
.find(|d| d.metadata.name.as_ref() == Some(&"postgres-cluster".to_string()));
if let Some(deployment) = postgres_deployment {
let total_replicas = deployment
.spec
.as_ref()
.and_then(|s| s.replicas)
.unwrap_or(0);
let ready_replicas = deployment
.status
.as_ref()
.and_then(|s| s.ready_replicas)
.unwrap_or(0);
println!(
"Deployment status: {}/{} replicas ready",
ready_replicas, total_replicas
);
}
println!("\n📋 Connection Information:");
println!(" Host: postgres-cluster.database.svc.cluster.local");
println!(" Port: 5432");
println!(" Database: postgres (default)");
println!(" Username: postgres (default)");
println!(" Password: Set POSTGRES_PASSWORD environment variable");
println!("\n🔧 To connect from another pod:");
println!(" psql -h postgres-cluster.database.svc.cluster.local -U postgres");
println!("\n💡 Next steps:");
println!(" • Set environment variables for database credentials");
println!(" • Add persistent volume claims for data storage");
println!(" • Configure backup and monitoring");
Ok(())
}

View File

@ -1,79 +0,0 @@
//! Redis Cluster Deployment Example (Rhai)
//!
//! This script shows how to deploy a Redis cluster using Rhai scripting
//! with the KubernetesManager convenience methods.
print("=== Redis Cluster Deployment ===");
// Create Kubernetes manager for the cache namespace
print("Creating Kubernetes manager for 'cache' namespace...");
let km = kubernetes_manager_new("cache");
print("✓ Kubernetes manager created");
// Create the namespace if it doesn't exist
print("Creating namespace 'cache' if it doesn't exist...");
try {
create_namespace(km, "cache");
print("✓ Namespace 'cache' created");
} catch(e) {
if e.to_string().contains("already exists") {
print("✓ Namespace 'cache' already exists");
} else {
print("⚠️ Warning: " + e);
}
}
// Clean up any existing resources first
print("\nCleaning up any existing Redis resources...");
try {
delete_deployment(km, "redis-cluster");
print("✓ Deleted existing deployment");
} catch(e) {
print("✓ No existing deployment to delete");
}
try {
delete_service(km, "redis-cluster");
print("✓ Deleted existing service");
} catch(e) {
print("✓ No existing service to delete");
}
// Create Redis cluster using the convenience method
print("\nDeploying Redis cluster...");
try {
// Deploy Redis using the convenience method
let result = deploy_application(km, "redis-cluster", "redis:7-alpine", 3, 6379, #{
"app": "redis-cluster",
"type": "cache",
"engine": "redis"
}, #{
"REDIS_PASSWORD": "redispassword",
"REDIS_PORT": "6379",
"REDIS_DATABASES": "16",
"REDIS_MAXMEMORY": "256mb",
"REDIS_MAXMEMORY_POLICY": "allkeys-lru"
});
print("✓ " + result);
print("\n✅ Redis cluster deployed successfully!");
print("\n📋 Connection Information:");
print(" Host: redis-cluster.cache.svc.cluster.local");
print(" Port: 6379");
print("\n🔧 To connect from another pod:");
print(" redis-cli -h redis-cluster.cache.svc.cluster.local");
print("\n💡 Next steps:");
print(" • Configure Redis authentication");
print(" • Set up Redis clustering configuration");
print(" • Add persistent storage");
print(" • Configure memory policies");
} catch(e) {
print("❌ Failed to deploy Redis cluster: " + e);
}
print("\n=== Deployment Complete ===");

View File

@ -1,109 +0,0 @@
//! Redis Cluster Deployment Example
//!
//! This example shows how to deploy a Redis cluster using the
//! KubernetesManager convenience methods.
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create Kubernetes manager for the cache namespace
let km = KubernetesManager::new("cache").await?;
// Create the namespace if it doesn't exist
println!("Creating namespace 'cache' if it doesn't exist...");
match km.namespace_create("cache").await {
Ok(_) => println!("✓ Namespace 'cache' created"),
Err(e) => {
if e.to_string().contains("already exists") {
println!("✓ Namespace 'cache' already exists");
} else {
return Err(e.into());
}
}
}
// Clean up any existing resources first
println!("Cleaning up any existing Redis resources...");
match km.deployment_delete("redis-cluster").await {
Ok(_) => println!("✓ Deleted existing deployment"),
Err(_) => println!("✓ No existing deployment to delete"),
}
match km.service_delete("redis-cluster").await {
Ok(_) => println!("✓ Deleted existing service"),
Err(_) => println!("✓ No existing service to delete"),
}
// Configure Redis-specific labels
let mut labels = HashMap::new();
labels.insert("app".to_string(), "redis-cluster".to_string());
labels.insert("type".to_string(), "cache".to_string());
labels.insert("engine".to_string(), "redis".to_string());
// Configure Redis environment variables
let mut env_vars = HashMap::new();
env_vars.insert("REDIS_PASSWORD".to_string(), "redispassword".to_string());
env_vars.insert("REDIS_PORT".to_string(), "6379".to_string());
env_vars.insert("REDIS_DATABASES".to_string(), "16".to_string());
env_vars.insert("REDIS_MAXMEMORY".to_string(), "256mb".to_string());
env_vars.insert(
"REDIS_MAXMEMORY_POLICY".to_string(),
"allkeys-lru".to_string(),
);
// Deploy the Redis cluster using the convenience method
println!("Deploying Redis cluster...");
km.deploy_application(
"redis-cluster", // name
"redis:7-alpine", // image
3, // replicas (Redis cluster nodes)
6379, // port
Some(labels), // labels
Some(env_vars), // environment variables
)
.await?;
println!("✅ Redis cluster deployed successfully!");
// Check deployment status
let deployments = km.deployments_list().await?;
let redis_deployment = deployments
.iter()
.find(|d| d.metadata.name.as_ref() == Some(&"redis-cluster".to_string()));
if let Some(deployment) = redis_deployment {
let total_replicas = deployment
.spec
.as_ref()
.and_then(|s| s.replicas)
.unwrap_or(0);
let ready_replicas = deployment
.status
.as_ref()
.and_then(|s| s.ready_replicas)
.unwrap_or(0);
println!(
"Deployment status: {}/{} replicas ready",
ready_replicas, total_replicas
);
}
println!("\n📋 Connection Information:");
println!(" Host: redis-cluster.cache.svc.cluster.local");
println!(" Port: 6379");
println!(" Password: Configure REDIS_PASSWORD environment variable");
println!("\n🔧 To connect from another pod:");
println!(" redis-cli -h redis-cluster.cache.svc.cluster.local");
println!("\n💡 Next steps:");
println!(" • Configure Redis authentication with environment variables");
println!(" • Set up Redis clustering configuration");
println!(" • Add persistent volume claims for data persistence");
println!(" • Configure memory limits and eviction policies");
Ok(())
}

View File

@ -1,208 +0,0 @@
//! Multi-namespace Kubernetes operations example
//!
//! This script demonstrates working with multiple namespaces and comparing resources across them.
//!
//! Prerequisites:
//! - A running Kubernetes cluster
//! - Valid kubeconfig file or in-cluster configuration
//! - Appropriate permissions for the operations
//!
//! Usage:
//! herodo examples/kubernetes/multi_namespace_operations.rhai
print("=== SAL Kubernetes Multi-Namespace Operations Example ===");
// Define namespaces to work with
let target_namespaces = ["default", "kube-system"];
let managers = #{};
print("Creating managers for multiple namespaces...");
// Create managers for each namespace
for ns in target_namespaces {
try {
let km = kubernetes_manager_new(ns);
managers[ns] = km;
print("✓ Created manager for namespace: " + ns);
} catch(e) {
print("✗ Failed to create manager for " + ns + ": " + e);
}
}
// Function to safely get resource counts
fn get_safe_counts(km) {
try {
return resource_counts(km);
} catch(e) {
print(" Warning: Could not get resource counts - " + e);
return #{};
}
}
// Function to safely get pod list
fn get_safe_pods(km) {
try {
return pods_list(km);
} catch(e) {
print(" Warning: Could not list pods - " + e);
return [];
}
}
// Compare resource counts across namespaces
print("\n--- Resource Comparison Across Namespaces ---");
let total_resources = #{};
for ns in target_namespaces {
if ns in managers {
let km = managers[ns];
print("\nNamespace: " + ns);
let counts = get_safe_counts(km);
for resource_type in counts.keys() {
let count = counts[resource_type];
print(" " + resource_type + ": " + count);
// Accumulate totals
if resource_type in total_resources {
total_resources[resource_type] = total_resources[resource_type] + count;
} else {
total_resources[resource_type] = count;
}
}
}
}
print("\n--- Total Resources Across All Namespaces ---");
for resource_type in total_resources.keys() {
print("Total " + resource_type + ": " + total_resources[resource_type]);
}
// Find namespaces with the most resources
print("\n--- Namespace Resource Analysis ---");
let namespace_totals = #{};
for ns in target_namespaces {
if ns in managers {
let km = managers[ns];
let counts = get_safe_counts(km);
let total = 0;
for resource_type in counts.keys() {
total = total + counts[resource_type];
}
namespace_totals[ns] = total;
print("Namespace '" + ns + "' has " + total + " total resources");
}
}
// Find the busiest namespace
let busiest_ns = "";
let max_resources = 0;
for ns in namespace_totals.keys() {
if namespace_totals[ns] > max_resources {
max_resources = namespace_totals[ns];
busiest_ns = ns;
}
}
if busiest_ns != "" {
print("🏆 Busiest namespace: '" + busiest_ns + "' with " + max_resources + " resources");
}
// Detailed pod analysis
print("\n--- Pod Analysis Across Namespaces ---");
let all_pods = [];
for ns in target_namespaces {
if ns in managers {
let km = managers[ns];
let pods = get_safe_pods(km);
print("\nNamespace '" + ns + "' pods:");
if pods.len() == 0 {
print(" (no pods)");
} else {
for pod in pods {
print(" - " + pod);
all_pods.push(ns + "/" + pod);
}
}
}
}
print("\n--- All Pods Summary ---");
print("Total pods across all namespaces: " + all_pods.len());
// Look for common pod name patterns
print("\n--- Pod Name Pattern Analysis ---");
let patterns = #{
"system": 0,
"kube": 0,
"coredns": 0,
"proxy": 0,
"controller": 0
};
for pod_full_name in all_pods {
let pod_name = pod_full_name.to_lower();
for pattern in patterns.keys() {
if pod_name.contains(pattern) {
patterns[pattern] = patterns[pattern] + 1;
}
}
}
print("Common pod name patterns found:");
for pattern in patterns.keys() {
if patterns[pattern] > 0 {
print(" '" + pattern + "': " + patterns[pattern] + " pods");
}
}
// Namespace health check
print("\n--- Namespace Health Check ---");
for ns in target_namespaces {
if ns in managers {
let km = managers[ns];
print("\nChecking namespace: " + ns);
// Check if namespace exists (should always be true for our managers)
let exists = namespace_exists(km, ns);
if exists {
print(" ✓ Namespace exists and is accessible");
} else {
print(" ✗ Namespace existence check failed");
}
// Try to get resource counts as a health indicator
let counts = get_safe_counts(km);
if counts.len() > 0 {
print(" ✓ Can access resources (" + counts.len() + " resource types)");
} else {
print(" ⚠ No resources found or access limited");
}
}
}
// Create a summary report
print("\n--- Summary Report ---");
print("Namespaces analyzed: " + target_namespaces.len());
print("Total unique resource types: " + total_resources.len());
let grand_total = 0;
for resource_type in total_resources.keys() {
grand_total = grand_total + total_resources[resource_type];
}
print("Grand total resources: " + grand_total);
print("\nResource breakdown:");
for resource_type in total_resources.keys() {
let count = total_resources[resource_type];
let percentage = (count * 100) / grand_total;
print(" " + resource_type + ": " + count + " (" + percentage + "%)");
}
print("\n=== Multi-namespace operations example completed! ===");

View File

@ -1,95 +0,0 @@
//! Kubernetes namespace management example
//!
//! This script demonstrates namespace creation and management operations.
//!
//! Prerequisites:
//! - A running Kubernetes cluster
//! - Valid kubeconfig file or in-cluster configuration
//! - Permissions to create and manage namespaces
//!
//! Usage:
//! herodo examples/kubernetes/namespace_management.rhai
print("=== SAL Kubernetes Namespace Management Example ===");
// Create a KubernetesManager
let km = kubernetes_manager_new("default");
print("Created KubernetesManager for namespace: " + namespace(km));
// Define test namespace names
let test_namespaces = [
"sal-test-namespace-1",
"sal-test-namespace-2",
"sal-example-app"
];
print("\n--- Creating Test Namespaces ---");
for ns in test_namespaces {
print("Creating namespace: " + ns);
try {
namespace_create(km, ns);
print("✓ Successfully created namespace: " + ns);
} catch(e) {
print("✗ Failed to create namespace " + ns + ": " + e);
}
}
// Wait a moment for namespaces to be created
print("\nWaiting for namespaces to be ready...");
// Verify namespaces were created
print("\n--- Verifying Namespace Creation ---");
for ns in test_namespaces {
let exists = namespace_exists(km, ns);
if exists {
print("✓ Namespace '" + ns + "' exists");
} else {
print("✗ Namespace '" + ns + "' was not found");
}
}
// List all namespaces to see our new ones
print("\n--- Current Namespaces ---");
let all_namespaces = namespaces_list(km);
print("Total namespaces in cluster: " + all_namespaces.len());
for ns in all_namespaces {
if ns.starts_with("sal-") {
print(" 🔹 " + ns + " (created by this example)");
} else {
print(" - " + ns);
}
}
// Test idempotent creation (creating the same namespace again)
print("\n--- Testing Idempotent Creation ---");
let test_ns = test_namespaces[0];
print("Attempting to create existing namespace: " + test_ns);
try {
namespace_create(km, test_ns);
print("✓ Idempotent creation successful (no error for existing namespace)");
} catch(e) {
print("✗ Unexpected error during idempotent creation: " + e);
}
// Create managers for the new namespaces and check their properties
print("\n--- Creating Managers for New Namespaces ---");
for ns in test_namespaces {
try {
let ns_km = kubernetes_manager_new(ns);
print("✓ Created manager for namespace: " + namespace(ns_km));
// Get resource counts for the new namespace (should be mostly empty)
let counts = resource_counts(ns_km);
print(" Resource counts: " + counts);
} catch(e) {
print("✗ Failed to create manager for " + ns + ": " + e);
}
}
print("\n--- Cleanup Instructions ---");
print("To clean up the test namespaces created by this example, run:");
for ns in test_namespaces {
print(" kubectl delete namespace " + ns);
}
print("\n=== Namespace management example completed! ===");

View File

@ -1,157 +0,0 @@
//! Kubernetes pattern-based deletion example
//!
//! This script demonstrates how to use PCRE patterns to delete multiple resources.
//!
//! ⚠️ WARNING: This example includes actual deletion operations!
//! ⚠️ Only run this in a test environment!
//!
//! Prerequisites:
//! - A running Kubernetes cluster (preferably a test cluster)
//! - Valid kubeconfig file or in-cluster configuration
//! - Permissions to delete resources
//!
//! Usage:
//! herodo examples/kubernetes/pattern_deletion.rhai
print("=== SAL Kubernetes Pattern Deletion Example ===");
print("⚠️ WARNING: This example will delete resources matching patterns!");
print("⚠️ Only run this in a test environment!");
// Create a KubernetesManager for a test namespace
let test_namespace = "sal-pattern-test";
let km = kubernetes_manager_new("default");
print("\nCreating test namespace: " + test_namespace);
try {
namespace_create(km, test_namespace);
print("✓ Test namespace created");
} catch(e) {
print("Note: " + e);
}
// Switch to the test namespace
let test_km = kubernetes_manager_new(test_namespace);
print("Switched to namespace: " + namespace(test_km));
// Show current resources before any operations
print("\n--- Current Resources in Test Namespace ---");
let counts = resource_counts(test_km);
print("Resource counts before operations:");
for resource_type in counts.keys() {
print(" " + resource_type + ": " + counts[resource_type]);
}
// List current pods to see what we're working with
let current_pods = pods_list(test_km);
print("\nCurrent pods in namespace:");
if current_pods.len() == 0 {
print(" (no pods found)");
} else {
for pod in current_pods {
print(" - " + pod);
}
}
// Demonstrate pattern matching without deletion first
print("\n--- Pattern Matching Demo (Dry Run) ---");
let test_patterns = [
"test-.*", // Match anything starting with "test-"
".*-temp$", // Match anything ending with "-temp"
"demo-pod-.*", // Match demo pods
"nginx-.*", // Match nginx pods
"app-[0-9]+", // Match app-1, app-2, etc.
];
for pattern in test_patterns {
print("Testing pattern: '" + pattern + "'");
// Check which pods would match this pattern
let matching_pods = [];
for pod in current_pods {
// Simple pattern matching simulation (Rhai doesn't have regex, so this is illustrative)
if pod.contains("test") && pattern == "test-.*" {
matching_pods.push(pod);
} else if pod.contains("temp") && pattern == ".*-temp$" {
matching_pods.push(pod);
} else if pod.contains("demo") && pattern == "demo-pod-.*" {
matching_pods.push(pod);
} else if pod.contains("nginx") && pattern == "nginx-.*" {
matching_pods.push(pod);
}
}
print(" Would match " + matching_pods.len() + " pods: " + matching_pods);
}
// Example of safe deletion patterns
print("\n--- Safe Deletion Examples ---");
print("These patterns are designed to be safe for testing:");
let safe_patterns = [
"test-example-.*", // Very specific test resources
"sal-demo-.*", // SAL demo resources
"temp-resource-.*", // Temporary resources
];
for pattern in safe_patterns {
print("\nTesting safe pattern: '" + pattern + "'");
try {
// This will actually attempt deletion, but should be safe in a test environment
let deleted_count = delete(test_km, pattern);
print("✓ Pattern '" + pattern + "' matched and deleted " + deleted_count + " resources");
} catch(e) {
print("Note: Pattern '" + pattern + "' - " + e);
}
}
// Show resources after deletion attempts
print("\n--- Resources After Deletion Attempts ---");
let final_counts = resource_counts(test_km);
print("Final resource counts:");
for resource_type in final_counts.keys() {
print(" " + resource_type + ": " + final_counts[resource_type]);
}
// Example of individual resource deletion
print("\n--- Individual Resource Deletion Examples ---");
print("These functions delete specific resources by name:");
// These are examples - they will fail if the resources don't exist, which is expected
let example_deletions = [
["pod", "test-pod-example"],
["service", "test-service-example"],
["deployment", "test-deployment-example"],
];
for deletion in example_deletions {
let resource_type = deletion[0];
let resource_name = deletion[1];
print("Attempting to delete " + resource_type + ": " + resource_name);
try {
if resource_type == "pod" {
pod_delete(test_km, resource_name);
} else if resource_type == "service" {
service_delete(test_km, resource_name);
} else if resource_type == "deployment" {
deployment_delete(test_km, resource_name);
}
print("✓ Successfully deleted " + resource_type + ": " + resource_name);
} catch(e) {
print("Note: " + resource_type + " '" + resource_name + "' - " + e);
}
}
print("\n--- Best Practices for Pattern Deletion ---");
print("1. Always test patterns in a safe environment first");
print("2. Use specific patterns rather than broad ones");
print("3. Consider using dry-run approaches when possible");
print("4. Have backups or be able to recreate resources");
print("5. Use descriptive naming conventions for easier pattern matching");
print("\n--- Cleanup ---");
print("To clean up the test namespace:");
print(" kubectl delete namespace " + test_namespace);
print("\n=== Pattern deletion example completed! ===");

View File

@ -1,33 +0,0 @@
//! Test Kubernetes module registration
//!
//! This script tests that the Kubernetes module is properly registered
//! and available in the Rhai environment.
print("=== Testing Kubernetes Module Registration ===");
// Test that we can reference the kubernetes functions
print("Testing function registration...");
// These should not error even if we can't connect to a cluster
let functions_to_test = [
"kubernetes_manager_new",
"pods_list",
"services_list",
"deployments_list",
"delete",
"namespace_create",
"namespace_exists",
"resource_counts",
"pod_delete",
"service_delete",
"deployment_delete",
"namespace"
];
for func_name in functions_to_test {
print("✓ Function '" + func_name + "' is available");
}
print("\n=== All Kubernetes functions are properly registered! ===");
print("Note: To test actual functionality, you need a running Kubernetes cluster.");
print("See other examples in this directory for real cluster operations.");

View File

@ -1,116 +0,0 @@
# Service Manager Examples
This directory contains examples demonstrating the SAL service manager functionality for dynamically launching and managing services across platforms.
## Overview
The service manager provides a unified interface for managing system services:
- **macOS**: Uses `launchctl` for service management
- **Linux**: Uses `zinit` for service management (systemd also available as alternative)
## Examples
### 1. Circle Worker Manager (`circle_worker_manager.rhai`)
**Primary Use Case**: Demonstrates dynamic circle worker management for freezone residents.
This example shows:
- Creating service configurations for circle workers
- Complete service lifecycle management (start, stop, restart, remove)
- Status monitoring and log retrieval
- Error handling and cleanup
```bash
# Run the circle worker management example
herodo examples/service_manager/circle_worker_manager.rhai
```
### 2. Basic Usage (`basic_usage.rhai`)
**Learning Example**: Simple demonstration of the core service manager API.
This example covers:
- Creating and configuring services
- Starting and stopping services
- Checking service status
- Listing managed services
- Retrieving service logs
```bash
# Run the basic usage example
herodo examples/service_manager/basic_usage.rhai
```
## Prerequisites
### Linux (zinit)
Make sure zinit is installed and running:
```bash
# Start zinit with default socket
zinit -s /tmp/zinit.sock init
```
### macOS (launchctl)
No additional setup required - uses the built-in launchctl system.
## Service Manager API
The service manager provides these key functions:
- `create_service_manager()` - Create platform-appropriate service manager
- `start(manager, config)` - Start a new service
- `stop(manager, service_name)` - Stop a running service
- `restart(manager, service_name)` - Restart a service
- `status(manager, service_name)` - Get service status
- `logs(manager, service_name, lines)` - Retrieve service logs
- `list(manager)` - List all managed services
- `remove(manager, service_name)` - Remove a service
- `exists(manager, service_name)` - Check if service exists
- `start_and_confirm(manager, config, timeout)` - Start with confirmation
## Service Configuration
Services are configured using a map with these fields:
```rhai
let config = #{
name: "my-service", // Service name
binary_path: "/usr/bin/my-app", // Executable path
args: ["--config", "/etc/my-app.conf"], // Command arguments
working_directory: "/var/lib/my-app", // Working directory (optional)
environment: #{ // Environment variables
"VAR1": "value1",
"VAR2": "value2"
},
auto_restart: true // Auto-restart on failure
};
```
## Real-World Usage
The circle worker example demonstrates the exact use case requested by the team:
> "We want to be able to launch circle workers dynamically. For instance when someone registers to the freezone, we need to be able to launch a circle worker for the new resident."
The service manager enables:
1. **Dynamic service creation** - Create services on-demand for new residents
2. **Cross-platform support** - Works on both macOS and Linux
3. **Lifecycle management** - Full control over service lifecycle
4. **Monitoring and logging** - Track service status and retrieve logs
5. **Cleanup** - Proper service removal when no longer needed
## Error Handling
All service manager functions can throw errors. Use try-catch blocks for robust error handling:
```rhai
try {
sm::start(manager, config);
print("✅ Service started successfully");
} catch (error) {
print(`❌ Failed to start service: ${error}`);
}
```

View File

@ -1,85 +0,0 @@
// Basic Service Manager Usage Example
//
// This example demonstrates the basic API of the service manager.
// It works on both macOS (launchctl) and Linux (zinit/systemd).
//
// Prerequisites:
//
// Linux: The service manager will automatically discover running zinit servers
// or fall back to systemd. To use zinit, start it with:
// zinit -s /tmp/zinit.sock init
//
// You can also specify a custom socket path:
// export ZINIT_SOCKET_PATH=/your/custom/path/zinit.sock
//
// macOS: No additional setup required (uses launchctl).
//
// Usage:
// herodo examples/service_manager/basic_usage.rhai
// Service Manager Basic Usage Example
// This example uses the SAL service manager through Rhai integration
print("🚀 Basic Service Manager Usage Example");
print("======================================");
// Create a service manager for the current platform
let manager = create_service_manager();
print("🍎 Using service manager for current platform");
// Create a simple service configuration
let config = #{
name: "example-service",
binary_path: "/bin/echo",
args: ["Hello from service manager!"],
working_directory: "/tmp",
environment: #{
"EXAMPLE_VAR": "hello_world"
},
auto_restart: false
};
print("\n📝 Service Configuration:");
print(` Name: ${config.name}`);
print(` Binary: ${config.binary_path}`);
print(` Args: ${config.args}`);
// Start the service
print("\n🚀 Starting service...");
start(manager, config);
print("✅ Service started successfully");
// Check service status
print("\n📊 Checking service status...");
let status = status(manager, "example-service");
print(`Status: ${status}`);
// List all services
print("\n📋 Listing all managed services...");
let services = list(manager);
print(`Found ${services.len()} services:`);
for service in services {
print(` - ${service}`);
}
// Get service logs
print("\n📄 Getting service logs...");
let logs = logs(manager, "example-service", 5);
if logs.trim() == "" {
print("No logs available");
} else {
print(`Logs:\n${logs}`);
}
// Stop the service
print("\n🛑 Stopping service...");
stop(manager, "example-service");
print("✅ Service stopped");
// Remove the service
print("\n🗑 Removing service...");
remove(manager, "example-service");
print("✅ Service removed");
print("\n🎉 Example completed successfully!");

View File

@ -1,141 +0,0 @@
// Circle Worker Manager Example
//
// This example demonstrates how to use the service manager to dynamically launch
// circle workers for new freezone residents. This is the primary use case requested
// by the team.
//
// Usage:
//
// On macOS (uses launchctl):
// herodo examples/service_manager/circle_worker_manager.rhai
//
// On Linux (uses zinit - requires zinit to be running):
// First start zinit: zinit -s /tmp/zinit.sock init
// herodo examples/service_manager/circle_worker_manager.rhai
// Circle Worker Manager Example
// This example uses the SAL service manager through Rhai integration
print("🚀 Circle Worker Manager Example");
print("=================================");
// Create the appropriate service manager for the current platform
let service_manager = create_service_manager();
print("✅ Created service manager for current platform");
// Simulate a new freezone resident registration
let resident_id = "resident_12345";
let worker_name = `circle-worker-${resident_id}`;
print(`\n📝 New freezone resident registered: ${resident_id}`);
print(`🔧 Creating circle worker service: ${worker_name}`);
// Create service configuration for the circle worker
let config = #{
name: worker_name,
binary_path: "/bin/sh",
args: [
"-c",
`echo 'Circle worker for ${resident_id} starting...'; sleep 30; echo 'Circle worker for ${resident_id} completed'`
],
working_directory: "/tmp",
environment: #{
"RESIDENT_ID": resident_id,
"WORKER_TYPE": "circle",
"LOG_LEVEL": "info"
},
auto_restart: true
};
print("📋 Service configuration created:");
print(` Name: ${config.name}`);
print(` Binary: ${config.binary_path}`);
print(` Args: ${config.args}`);
print(` Auto-restart: ${config.auto_restart}`);
print(`\n🔄 Demonstrating service lifecycle for: ${worker_name}`);
// 1. Check if service already exists
print("\n1⃣ Checking if service exists...");
if exists(service_manager, worker_name) {
print("⚠️ Service already exists, removing it first...");
remove(service_manager, worker_name);
print("🗑️ Existing service removed");
} else {
print("✅ Service doesn't exist, ready to create");
}
// 2. Start the service
print("\n2⃣ Starting the circle worker service...");
start(service_manager, config);
print("✅ Service started successfully");
// 3. Check service status
print("\n3⃣ Checking service status...");
let status = status(service_manager, worker_name);
print(`📊 Service status: ${status}`);
// 4. List all services to show our service is there
print("\n4⃣ Listing all managed services...");
let services = list(service_manager);
print(`📋 Managed services (${services.len()}):`);
for service in services {
let marker = if service == worker_name { "👉" } else { " " };
print(` ${marker} ${service}`);
}
// 5. Wait a moment and check status again
print("\n5⃣ Waiting 3 seconds and checking status again...");
sleep(3000); // 3 seconds in milliseconds
let status = status(service_manager, worker_name);
print(`📊 Service status after 3s: ${status}`);
// 6. Get service logs
print("\n6⃣ Retrieving service logs...");
let logs = logs(service_manager, worker_name, 10);
if logs.trim() == "" {
print("📄 No logs available yet (this is normal for new services)");
} else {
print("📄 Recent logs:");
let log_lines = logs.split('\n');
for i in 0..5 {
if i < log_lines.len() {
print(` ${log_lines[i]}`);
}
}
}
// 7. Demonstrate start_and_confirm with timeout
print("\n7⃣ Testing start_and_confirm (should succeed quickly since already running)...");
start_and_confirm(service_manager, config, 5);
print("✅ Service confirmed running within timeout");
// 8. Stop the service
print("\n8⃣ Stopping the service...");
stop(service_manager, worker_name);
print("🛑 Service stopped");
// 9. Check status after stopping
print("\n9⃣ Checking status after stop...");
let status = status(service_manager, worker_name);
print(`📊 Service status after stop: ${status}`);
// 10. Restart the service
print("\n🔟 Restarting the service...");
restart(service_manager, worker_name);
print("🔄 Service restarted successfully");
// 11. Final cleanup
print("\n🧹 Cleaning up - removing the service...");
remove(service_manager, worker_name);
print("🗑️ Service removed successfully");
// 12. Verify removal
print("\n✅ Verifying service removal...");
if !exists(service_manager, worker_name) {
print("✅ Service successfully removed");
} else {
print("⚠️ Service still exists after removal");
}
print("\n🎉 Circle worker management demonstration complete!");

View File

@ -1,18 +1,9 @@
# SAL Git Package (`sal-git`)
# SAL `git` Module
The `sal-git` package provides comprehensive functionalities for interacting with Git repositories. It offers both high-level abstractions for common Git workflows and a flexible executor for running arbitrary Git commands with integrated authentication.
The `git` module in SAL provides comprehensive functionalities for interacting with Git repositories. It offers both high-level abstractions for common Git workflows and a flexible executor for running arbitrary Git commands with integrated authentication.
This module is central to SAL's capabilities for managing source code, enabling automation of development tasks, and integrating with version control systems.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-git = "0.1.0"
```
## Core Components
The module is primarily composed of two main parts:

View File

@ -18,8 +18,8 @@ path = "src/main.rs"
env_logger = { workspace = true }
rhai = { workspace = true }
# SAL library for Rhai module registration (with all features for herodo)
sal = { path = "..", features = ["all"] }
# SAL library for Rhai module registration
sal = { path = ".." }
[dev-dependencies]
tempfile = { workspace = true }

View File

@ -15,32 +15,14 @@ Herodo is a command-line utility that executes Rhai scripts with full access to
## Installation
### Build and Install
Build the herodo binary:
```bash
git clone https://github.com/PlanetFirst/sal.git
cd sal
./build_herodo.sh
cd herodo
cargo build --release
```
This script will:
- Build herodo in debug mode
- Install it to `~/hero/bin/herodo` (non-root) or `/usr/local/bin/herodo` (root)
- Make it available in your PATH
**Note**: If using the non-root installation, make sure `~/hero/bin` is in your PATH:
```bash
export PATH="$HOME/hero/bin:$PATH"
```
### Install from crates.io (Coming Soon)
```bash
# This will be available once herodo is published to crates.io
cargo install herodo
```
**Note**: `herodo` is not yet published to crates.io due to publishing rate limits. It will be available soon.
The executable will be available at `target/release/herodo`.
## Usage

View File

@ -1,57 +0,0 @@
[package]
name = "sal-kubernetes"
version = "0.1.0"
edition = "2021"
authors = ["PlanetFirst <info@incubaid.com>"]
description = "SAL Kubernetes - Kubernetes cluster management and operations using kube-rs SDK"
repository = "https://git.threefold.info/herocode/sal"
license = "Apache-2.0"
keywords = ["kubernetes", "k8s", "cluster", "container", "orchestration"]
categories = ["api-bindings", "development-tools"]
[dependencies]
# Kubernetes client library
kube = { version = "0.95.0", features = ["client", "config", "derive"] }
k8s-openapi = { version = "0.23.0", features = ["latest"] }
# Async runtime
tokio = { version = "1.45.0", features = ["full"] }
# Production safety features
tokio-retry = "0.3.0"
governor = "0.6.3"
tower = { version = "0.5.2", features = ["timeout", "limit"] }
# Error handling
thiserror = "2.0.12"
anyhow = "1.0.98"
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.9"
# Regular expressions for pattern matching
regex = "1.10.2"
# Logging
log = "0.4"
# Rhai scripting support (optional)
rhai = { version = "1.12.0", features = ["sync"], optional = true }
once_cell = "1.20.2"
# UUID for resource identification
uuid = { version = "1.16.0", features = ["v4"] }
# Base64 encoding for secrets
base64 = "0.22.1"
[dev-dependencies]
tempfile = "3.5"
tokio-test = "0.4.4"
env_logger = "0.11.5"
[features]
default = ["rhai"]
rhai = ["dep:rhai"]

View File

@ -1,443 +0,0 @@
# SAL Kubernetes (`sal-kubernetes`)
Kubernetes cluster management and operations for the System Abstraction Layer (SAL).
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-kubernetes = "0.1.0"
```
## ⚠️ **IMPORTANT SECURITY NOTICE**
**This package includes destructive operations that can permanently delete Kubernetes resources!**
- The `delete(pattern)` function uses PCRE regex patterns to bulk delete resources
- **Always test patterns in a safe environment first**
- Use specific patterns to avoid accidental deletion of critical resources
- Consider the impact on dependent resources before deletion
- **No confirmation prompts** - deletions are immediate and irreversible
## Overview
This package provides a high-level interface for managing Kubernetes clusters using the `kube-rs` SDK. It focuses on namespace-scoped operations through the `KubernetesManager` factory pattern.
### Production Safety Features
- **Configurable Timeouts**: All operations have configurable timeouts to prevent hanging
- **Exponential Backoff Retry**: Automatic retry logic for transient failures
- **Rate Limiting**: Built-in rate limiting to prevent API overload
- **Comprehensive Error Handling**: Detailed error types and proper error propagation
- **Structured Logging**: Production-ready logging for monitoring and debugging
## Features
- **Application Deployment**: Deploy complete applications with a single method call
- **Environment Variables & Labels**: Configure containers with environment variables and Kubernetes labels
- **Resource Lifecycle Management**: Automatic cleanup and replacement of existing resources
- **Namespace-scoped Management**: Each `KubernetesManager` instance operates on a single namespace
- **Pod Management**: List, create, and manage pods
- **Pattern-based Deletion**: Delete resources using PCRE pattern matching
- **Namespace Operations**: Create and manage namespaces (idempotent operations)
- **Resource Management**: Support for pods, services, deployments, configmaps, secrets, and more
- **Rhai Integration**: Full scripting support through Rhai wrappers with environment variables
## Core Concepts
### Labels vs Environment Variables
Understanding the difference between labels and environment variables is crucial for effective Kubernetes deployments:
#### **Labels** (Kubernetes Metadata)
- **Purpose**: Organize, select, and manage Kubernetes resources
- **Scope**: Kubernetes cluster management and resource organization
- **Visibility**: Used by Kubernetes controllers, selectors, and monitoring systems
- **Examples**: `app=my-app`, `tier=backend`, `environment=production`, `version=v1.2.3`
- **Use Cases**: Resource grouping, service discovery, monitoring labels, deployment strategies
#### **Environment Variables** (Container Configuration)
- **Purpose**: Configure application runtime behavior and settings
- **Scope**: Inside container processes - available to your application code
- **Visibility**: Accessible via `process.env`, `os.environ`, etc. in your application
- **Examples**: `NODE_ENV=production`, `DATABASE_URL=postgres://...`, `API_KEY=secret`
- **Use Cases**: Database connections, API keys, feature flags, runtime configuration
#### **Example: Complete Application Configuration**
```rust
// Labels: For Kubernetes resource management
let mut labels = HashMap::new();
labels.insert("app".to_string(), "web-api".to_string()); // Service discovery
labels.insert("tier".to_string(), "backend".to_string()); // Architecture layer
labels.insert("environment".to_string(), "production".to_string()); // Deployment stage
labels.insert("version".to_string(), "v2.1.0".to_string()); // Release version
// Environment Variables: For application configuration
let mut env_vars = HashMap::new();
env_vars.insert("NODE_ENV".to_string(), "production".to_string()); // Runtime mode
env_vars.insert("DATABASE_URL".to_string(), "postgres://db:5432/app".to_string()); // DB connection
env_vars.insert("REDIS_URL".to_string(), "redis://cache:6379".to_string()); // Cache connection
env_vars.insert("LOG_LEVEL".to_string(), "info".to_string()); // Logging config
```
## Usage
### Application Deployment (Recommended)
Deploy complete applications with labels and environment variables:
```rust
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let km = KubernetesManager::new("default").await?;
// Configure labels for Kubernetes resource organization
let mut labels = HashMap::new();
labels.insert("app".to_string(), "my-app".to_string());
labels.insert("tier".to_string(), "backend".to_string());
// Configure environment variables for the container
let mut env_vars = HashMap::new();
env_vars.insert("NODE_ENV".to_string(), "production".to_string());
env_vars.insert("DATABASE_URL".to_string(), "postgres://db:5432/myapp".to_string());
env_vars.insert("API_KEY".to_string(), "secret-api-key".to_string());
// Deploy application with deployment + service
km.deploy_application(
"my-app", // name
"node:18-alpine", // image
3, // replicas
3000, // port
Some(labels), // Kubernetes labels
Some(env_vars), // container environment variables
).await?;
println!("✅ Application deployed successfully!");
Ok(())
}
```
### Basic Operations
```rust
use sal_kubernetes::KubernetesManager;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a manager for the "default" namespace
let km = KubernetesManager::new("default").await?;
// List all pods in the namespace
let pods = km.pods_list().await?;
println!("Found {} pods", pods.len());
// Create a namespace (no error if it already exists)
km.namespace_create("my-namespace").await?;
// Delete resources matching a pattern
km.delete("test-.*").await?;
Ok(())
}
```
### Rhai Scripting
```javascript
// Create Kubernetes manager for namespace
let km = kubernetes_manager_new("default");
// Deploy application with labels and environment variables
deploy_application(km, "my-app", "node:18-alpine", 3, 3000, #{
"app": "my-app",
"tier": "backend",
"environment": "production"
}, #{
"NODE_ENV": "production",
"DATABASE_URL": "postgres://db:5432/myapp",
"API_KEY": "secret-api-key"
});
print("✅ Application deployed!");
// Basic operations
let pods = pods_list(km);
print("Found " + pods.len() + " pods");
namespace_create(km, "my-namespace");
delete(km, "test-.*");
```
## Dependencies
- `kube`: Kubernetes client library
- `k8s-openapi`: Kubernetes API types
- `tokio`: Async runtime
- `regex`: Pattern matching for resource deletion
- `rhai`: Scripting integration (optional)
## Configuration
### Kubernetes Authentication
The package uses the standard Kubernetes configuration methods:
- In-cluster configuration (when running in a pod)
- Kubeconfig file (`~/.kube/config` or `KUBECONFIG` environment variable)
- Service account tokens
### Production Safety Configuration
```rust
use sal_kubernetes::{KubernetesManager, KubernetesConfig};
use std::time::Duration;
// Create with custom configuration
let config = KubernetesConfig::new()
.with_timeout(Duration::from_secs(60))
.with_retries(5, Duration::from_secs(1), Duration::from_secs(30))
.with_rate_limit(20, 50);
let km = KubernetesManager::with_config("my-namespace", config).await?;
```
### Pre-configured Profiles
```rust
// High-throughput environment
let config = KubernetesConfig::high_throughput();
// Low-latency environment
let config = KubernetesConfig::low_latency();
// Development/testing
let config = KubernetesConfig::development();
```
## Error Handling
All operations return `Result<T, KubernetesError>` with comprehensive error types for different failure scenarios including API errors, configuration issues, and permission problems.
## API Reference
### KubernetesManager
The main interface for Kubernetes operations. Each instance is scoped to a single namespace.
#### Constructor
- `KubernetesManager::new(namespace)` - Create a manager for the specified namespace
#### Application Deployment
- `deploy_application(name, image, replicas, port, labels, env_vars)` - Deploy complete application with deployment and service
- `deployment_create(name, image, replicas, labels, env_vars)` - Create deployment with environment variables and labels
#### Resource Creation
- `pod_create(name, image, labels, env_vars)` - Create pod with environment variables and labels
- `service_create(name, selector, port, target_port)` - Create service with port mapping
- `configmap_create(name, data)` - Create configmap with data
- `secret_create(name, data, secret_type)` - Create secret with data and optional type
#### Resource Listing
- `pods_list()` - List all pods in the namespace
- `services_list()` - List all services in the namespace
- `deployments_list()` - List all deployments in the namespace
- `configmaps_list()` - List all configmaps in the namespace
- `secrets_list()` - List all secrets in the namespace
#### Resource Management
- `pod_get(name)` - Get a specific pod by name
- `service_get(name)` - Get a specific service by name
- `deployment_get(name)` - Get a specific deployment by name
- `pod_delete(name)` - Delete a specific pod by name
- `service_delete(name)` - Delete a specific service by name
- `deployment_delete(name)` - Delete a specific deployment by name
- `configmap_delete(name)` - Delete a specific configmap by name
- `secret_delete(name)` - Delete a specific secret by name
#### Pattern-based Operations
- `delete(pattern)` - Delete all resources matching a PCRE pattern
#### Namespace Operations
- `namespace_create(name)` - Create a namespace (idempotent)
- `namespace_exists(name)` - Check if a namespace exists
- `namespaces_list()` - List all namespaces (cluster-wide)
#### Utility Functions
- `resource_counts()` - Get counts of all resource types in the namespace
- `namespace()` - Get the namespace this manager operates on
### Rhai Functions
When using the Rhai integration, the following functions are available:
**Manager Creation & Application Deployment:**
- `kubernetes_manager_new(namespace)` - Create a KubernetesManager
- `deploy_application(km, name, image, replicas, port, labels, env_vars)` - Deploy application with environment variables
**Resource Listing:**
- `pods_list(km)` - List pods
- `services_list(km)` - List services
- `deployments_list(km)` - List deployments
- `configmaps_list(km)` - List configmaps
- `secrets_list(km)` - List secrets
- `namespaces_list(km)` - List all namespaces
- `resource_counts(km)` - Get resource counts
**Resource Operations:**
- `delete(km, pattern)` - Delete resources matching pattern
- `pod_delete(km, name)` - Delete specific pod
- `service_delete(km, name)` - Delete specific service
- `deployment_delete(km, name)` - Delete specific deployment
- `configmap_delete(km, name)` - Delete specific configmap
- `secret_delete(km, name)` - Delete specific secret
**Namespace Functions:**
- `namespace_create(km, name)` - Create namespace
- `namespace_exists(km, name)` - Check namespace existence
- `namespace_delete(km, name)` - Delete namespace
- `namespace(km)` - Get manager's namespace
## Examples
The `examples/kubernetes/clusters/` directory contains comprehensive examples:
### Rust Examples
Run with: `cargo run --example <name> --features kubernetes`
- `postgres` - PostgreSQL database deployment with environment variables
- `redis` - Redis cache deployment with configuration
- `generic` - Multiple application deployments (nginx, node.js, mongodb)
### Rhai Examples
Run with: `./target/debug/herodo examples/kubernetes/clusters/<script>.rhai`
- `postgres.rhai` - PostgreSQL cluster deployment script
- `redis.rhai` - Redis cluster deployment script
### Real-World Examples
#### PostgreSQL Database
```rust
let mut env_vars = HashMap::new();
env_vars.insert("POSTGRES_DB".to_string(), "myapp".to_string());
env_vars.insert("POSTGRES_USER".to_string(), "postgres".to_string());
env_vars.insert("POSTGRES_PASSWORD".to_string(), "secretpassword".to_string());
km.deploy_application("postgres", "postgres:15", 1, 5432, Some(labels), Some(env_vars)).await?;
```
#### Redis Cache
```rust
let mut env_vars = HashMap::new();
env_vars.insert("REDIS_PASSWORD".to_string(), "redispassword".to_string());
env_vars.insert("REDIS_MAXMEMORY".to_string(), "256mb".to_string());
km.deploy_application("redis", "redis:7-alpine", 3, 6379, None, Some(env_vars)).await?;
```
## Testing
### Test Coverage
The module includes comprehensive test coverage:
- **Unit Tests**: Core functionality without cluster dependency
- **Integration Tests**: Real Kubernetes cluster operations
- **Environment Variables Tests**: Complete env var functionality testing
- **Edge Cases Tests**: Error handling and boundary conditions
- **Rhai Integration Tests**: Scripting environment testing
- **Production Readiness Tests**: Concurrent operations and error handling
### Running Tests
```bash
# Unit tests (no cluster required)
cargo test --package sal-kubernetes
# Integration tests (requires cluster)
KUBERNETES_TEST_ENABLED=1 cargo test --package sal-kubernetes
# Rhai integration tests
KUBERNETES_TEST_ENABLED=1 cargo test --package sal-kubernetes --features rhai
# Run specific test suites
cargo test --package sal-kubernetes deployment_env_vars_test
cargo test --package sal-kubernetes edge_cases_test
# Rhai environment variables test
KUBERNETES_TEST_ENABLED=1 ./target/debug/herodo kubernetes/tests/rhai/env_vars_test.rhai
```
### Test Requirements
- **Kubernetes Cluster**: Integration tests require a running Kubernetes cluster
- **Environment Variable**: Set `KUBERNETES_TEST_ENABLED=1` to enable integration tests
- **Permissions**: Tests require permissions to create/delete resources in the `default` namespace
## Production Considerations
### Security
- Always use specific PCRE patterns to avoid accidental deletion of important resources
- Test deletion patterns in a safe environment first
- Ensure proper RBAC permissions are configured
- Be cautious with cluster-wide operations like namespace listing
- Use Kubernetes secrets for sensitive environment variables instead of plain text
### Performance & Scalability
- Consider adding resource limits (CPU/memory) for production deployments
- Use persistent volumes for stateful applications
- Configure readiness and liveness probes for health checks
- Implement proper monitoring and logging labels
### Environment Variables Best Practices
- Use Kubernetes secrets for sensitive data (passwords, API keys)
- Validate environment variable values before deployment
- Use consistent naming conventions (e.g., `DATABASE_URL`, `API_KEY`)
- Document required vs optional environment variables
### Example: Production-Ready Deployment
```rust
// Production labels for monitoring and management
let mut labels = HashMap::new();
labels.insert("app".to_string(), "web-api".to_string());
labels.insert("version".to_string(), "v1.2.3".to_string());
labels.insert("environment".to_string(), "production".to_string());
labels.insert("team".to_string(), "backend".to_string());
// Non-sensitive environment variables
let mut env_vars = HashMap::new();
env_vars.insert("NODE_ENV".to_string(), "production".to_string());
env_vars.insert("LOG_LEVEL".to_string(), "info".to_string());
env_vars.insert("PORT".to_string(), "3000".to_string());
// Note: Use Kubernetes secrets for DATABASE_URL, API_KEY, etc.
km.deploy_application("web-api", "myapp:v1.2.3", 3, 3000, Some(labels), Some(env_vars)).await?;
```

View File

@ -1,113 +0,0 @@
//! Configuration for production safety features
use std::time::Duration;
/// Configuration for Kubernetes operations with production safety features
#[derive(Debug, Clone)]
pub struct KubernetesConfig {
/// Timeout for individual API operations
pub operation_timeout: Duration,
/// Maximum number of retry attempts for failed operations
pub max_retries: u32,
/// Base delay for exponential backoff retry strategy
pub retry_base_delay: Duration,
/// Maximum delay between retries
pub retry_max_delay: Duration,
/// Rate limiting: maximum requests per second
pub rate_limit_rps: u32,
/// Rate limiting: burst capacity
pub rate_limit_burst: u32,
}
impl Default for KubernetesConfig {
fn default() -> Self {
Self {
// Conservative timeout for production
operation_timeout: Duration::from_secs(30),
// Reasonable retry attempts
max_retries: 3,
// Exponential backoff starting at 1 second
retry_base_delay: Duration::from_secs(1),
// Maximum 30 seconds between retries
retry_max_delay: Duration::from_secs(30),
// Conservative rate limiting: 10 requests per second
rate_limit_rps: 10,
// Allow small bursts
rate_limit_burst: 20,
}
}
}
impl KubernetesConfig {
/// Create a new configuration with custom settings
pub fn new() -> Self {
Self::default()
}
/// Set operation timeout
pub fn with_timeout(mut self, timeout: Duration) -> Self {
self.operation_timeout = timeout;
self
}
/// Set retry configuration
pub fn with_retries(mut self, max_retries: u32, base_delay: Duration, max_delay: Duration) -> Self {
self.max_retries = max_retries;
self.retry_base_delay = base_delay;
self.retry_max_delay = max_delay;
self
}
/// Set rate limiting configuration
pub fn with_rate_limit(mut self, rps: u32, burst: u32) -> Self {
self.rate_limit_rps = rps;
self.rate_limit_burst = burst;
self
}
/// Create configuration optimized for high-throughput environments
pub fn high_throughput() -> Self {
Self {
operation_timeout: Duration::from_secs(60),
max_retries: 5,
retry_base_delay: Duration::from_millis(500),
retry_max_delay: Duration::from_secs(60),
rate_limit_rps: 50,
rate_limit_burst: 100,
}
}
/// Create configuration optimized for low-latency environments
pub fn low_latency() -> Self {
Self {
operation_timeout: Duration::from_secs(10),
max_retries: 2,
retry_base_delay: Duration::from_millis(100),
retry_max_delay: Duration::from_secs(5),
rate_limit_rps: 20,
rate_limit_burst: 40,
}
}
/// Create configuration for development/testing
pub fn development() -> Self {
Self {
operation_timeout: Duration::from_secs(120),
max_retries: 1,
retry_base_delay: Duration::from_millis(100),
retry_max_delay: Duration::from_secs(2),
rate_limit_rps: 100,
rate_limit_burst: 200,
}
}
}

View File

@ -1,85 +0,0 @@
//! Error types for SAL Kubernetes operations
use thiserror::Error;
/// Errors that can occur during Kubernetes operations
#[derive(Error, Debug)]
pub enum KubernetesError {
/// Kubernetes API client error
#[error("Kubernetes API error: {0}")]
ApiError(#[from] kube::Error),
/// Configuration error
#[error("Configuration error: {0}")]
ConfigError(String),
/// Resource not found error
#[error("Resource not found: {0}")]
ResourceNotFound(String),
/// Invalid resource name or pattern
#[error("Invalid resource name or pattern: {0}")]
InvalidResourceName(String),
/// Regular expression error
#[error("Regular expression error: {0}")]
RegexError(#[from] regex::Error),
/// Serialization/deserialization error
#[error("Serialization error: {0}")]
SerializationError(#[from] serde_json::Error),
/// YAML parsing error
#[error("YAML error: {0}")]
YamlError(#[from] serde_yaml::Error),
/// Generic operation error
#[error("Operation failed: {0}")]
OperationError(String),
/// Namespace error
#[error("Namespace error: {0}")]
NamespaceError(String),
/// Permission denied error
#[error("Permission denied: {0}")]
PermissionDenied(String),
/// Timeout error
#[error("Operation timed out: {0}")]
Timeout(String),
/// Generic error wrapper
#[error("Generic error: {0}")]
Generic(#[from] anyhow::Error),
}
impl KubernetesError {
/// Create a new configuration error
pub fn config_error(msg: impl Into<String>) -> Self {
Self::ConfigError(msg.into())
}
/// Create a new operation error
pub fn operation_error(msg: impl Into<String>) -> Self {
Self::OperationError(msg.into())
}
/// Create a new namespace error
pub fn namespace_error(msg: impl Into<String>) -> Self {
Self::NamespaceError(msg.into())
}
/// Create a new permission denied error
pub fn permission_denied(msg: impl Into<String>) -> Self {
Self::PermissionDenied(msg.into())
}
/// Create a new timeout error
pub fn timeout(msg: impl Into<String>) -> Self {
Self::Timeout(msg.into())
}
}
/// Result type for Kubernetes operations
pub type KubernetesResult<T> = Result<T, KubernetesError>;

File diff suppressed because it is too large Load Diff

View File

@ -1,49 +0,0 @@
//! SAL Kubernetes: Kubernetes cluster management and operations
//!
//! This package provides Kubernetes cluster management functionality including:
//! - Namespace-scoped resource management via KubernetesManager
//! - Pod listing and management
//! - Resource deletion with PCRE pattern matching
//! - Namespace creation and management
//! - Support for various Kubernetes resources (pods, services, deployments, etc.)
//!
//! # Example
//!
//! ```rust
//! use sal_kubernetes::KubernetesManager;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Create a manager for the "default" namespace
//! let km = KubernetesManager::new("default").await?;
//!
//! // List all pods in the namespace
//! let pods = km.pods_list().await?;
//! println!("Found {} pods", pods.len());
//!
//! // Create a namespace (idempotent)
//! km.namespace_create("my-namespace").await?;
//!
//! // Delete resources matching a pattern
//! km.delete("test-.*").await?;
//!
//! Ok(())
//! }
//! ```
pub mod config;
pub mod error;
pub mod kubernetes_manager;
// Rhai integration module
#[cfg(feature = "rhai")]
pub mod rhai;
// Re-export main types for convenience
pub use config::KubernetesConfig;
pub use error::KubernetesError;
pub use kubernetes_manager::KubernetesManager;
// Re-export commonly used Kubernetes types
pub use k8s_openapi::api::apps::v1::{Deployment, ReplicaSet};
pub use k8s_openapi::api::core::v1::{Namespace, Pod, Service};

View File

@ -1,729 +0,0 @@
//! Rhai wrappers for Kubernetes module functions
//!
//! This module provides Rhai wrappers for the functions in the Kubernetes module,
//! enabling scripting access to Kubernetes operations.
use crate::{KubernetesError, KubernetesManager};
use once_cell::sync::Lazy;
use rhai::{Array, Dynamic, Engine, EvalAltResult, Map};
use std::sync::Mutex;
use tokio::runtime::Runtime;
// Global Tokio runtime for blocking async operations
static RUNTIME: Lazy<Mutex<Runtime>> =
Lazy::new(|| Mutex::new(Runtime::new().expect("Failed to create Tokio runtime")));
/// Helper function to convert Rhai Map to HashMap for environment variables
///
/// # Arguments
///
/// * `rhai_map` - Rhai Map containing key-value pairs
///
/// # Returns
///
/// * `Option<std::collections::HashMap<String, String>>` - Converted HashMap or None if empty
fn convert_rhai_map_to_env_vars(
rhai_map: Map,
) -> Option<std::collections::HashMap<String, String>> {
if rhai_map.is_empty() {
None
} else {
Some(
rhai_map
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect(),
)
}
}
/// Helper function to execute async operations with proper runtime handling
///
/// This uses a global runtime to ensure consistent async execution
fn execute_async<F, T>(future: F) -> Result<T, Box<EvalAltResult>>
where
F: std::future::Future<Output = Result<T, KubernetesError>>,
{
// Get the global runtime
let rt = match RUNTIME.lock() {
Ok(rt) => rt,
Err(e) => {
return Err(Box::new(EvalAltResult::ErrorRuntime(
format!("Failed to acquire runtime lock: {e}").into(),
rhai::Position::NONE,
)));
}
};
// Execute the future in a blocking manner
rt.block_on(future).map_err(kubernetes_error_to_rhai_error)
}
/// Create a new KubernetesManager for the specified namespace
///
/// # Arguments
///
/// * `namespace` - The Kubernetes namespace to operate on
///
/// # Returns
///
/// * `Result<KubernetesManager, Box<EvalAltResult>>` - The manager instance or an error
fn kubernetes_manager_new(namespace: String) -> Result<KubernetesManager, Box<EvalAltResult>> {
execute_async(KubernetesManager::new(namespace))
}
/// List all pods in the namespace
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
///
/// # Returns
///
/// * `Result<Array, Box<EvalAltResult>>` - Array of pod names or an error
fn pods_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
let pods = execute_async(km.pods_list())?;
let pod_names: Array = pods
.iter()
.filter_map(|pod| pod.metadata.name.as_ref())
.map(|name| Dynamic::from(name.clone()))
.collect();
Ok(pod_names)
}
/// List all services in the namespace
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
///
/// # Returns
///
/// * `Result<Array, Box<EvalAltResult>>` - Array of service names or an error
fn services_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
let services = execute_async(km.services_list())?;
let service_names: Array = services
.iter()
.filter_map(|service| service.metadata.name.as_ref())
.map(|name| Dynamic::from(name.clone()))
.collect();
Ok(service_names)
}
/// List all deployments in the namespace
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
///
/// # Returns
///
/// * `Result<Array, Box<EvalAltResult>>` - Array of deployment names or an error
fn deployments_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
let deployments = execute_async(km.deployments_list())?;
let deployment_names: Array = deployments
.iter()
.filter_map(|deployment| deployment.metadata.name.as_ref())
.map(|name| Dynamic::from(name.clone()))
.collect();
Ok(deployment_names)
}
/// List all configmaps in the namespace
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
///
/// # Returns
///
/// * `Result<Array, Box<EvalAltResult>>` - Array of configmap names or an error
fn configmaps_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
let configmaps = execute_async(km.configmaps_list())?;
let configmap_names: Array = configmaps
.iter()
.filter_map(|configmap| configmap.metadata.name.as_ref())
.map(|name| Dynamic::from(name.clone()))
.collect();
Ok(configmap_names)
}
/// List all secrets in the namespace
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
///
/// # Returns
///
/// * `Result<Array, Box<EvalAltResult>>` - Array of secret names or an error
fn secrets_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
let secrets = execute_async(km.secrets_list())?;
let secret_names: Array = secrets
.iter()
.filter_map(|secret| secret.metadata.name.as_ref())
.map(|name| Dynamic::from(name.clone()))
.collect();
Ok(secret_names)
}
/// Delete resources matching a PCRE pattern
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
/// * `pattern` - PCRE pattern to match resource names against
///
/// # Returns
///
/// * `Result<i64, Box<EvalAltResult>>` - Number of resources deleted or an error
///
/// Create a pod with a single container (backward compatible version)
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the pod
/// * `image` - Container image to use
/// * `labels` - Optional labels as a Map
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
fn pod_create(
km: &mut KubernetesManager,
name: String,
image: String,
labels: Map,
) -> Result<String, Box<EvalAltResult>> {
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
None
} else {
Some(
labels
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect(),
)
};
let pod = execute_async(km.pod_create(&name, &image, labels_map, None))?;
Ok(pod.metadata.name.unwrap_or(name))
}
/// Create a pod with a single container and environment variables
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the pod
/// * `image` - Container image to use
/// * `labels` - Optional labels as a Map
/// * `env_vars` - Optional environment variables as a Map
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
fn pod_create_with_env(
km: &mut KubernetesManager,
name: String,
image: String,
labels: Map,
env_vars: Map,
) -> Result<String, Box<EvalAltResult>> {
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
None
} else {
Some(
labels
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect(),
)
};
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
let pod = execute_async(km.pod_create(&name, &image, labels_map, env_vars_map))?;
Ok(pod.metadata.name.unwrap_or(name))
}
/// Create a service
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the service
/// * `selector` - Labels to select pods as a Map
/// * `port` - Port to expose
/// * `target_port` - Target port on pods (optional, defaults to port)
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Service name or an error
fn service_create(
km: &mut KubernetesManager,
name: String,
selector: Map,
port: i64,
target_port: i64,
) -> Result<String, Box<EvalAltResult>> {
let selector_map: std::collections::HashMap<String, String> = selector
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
let target_port_opt = if target_port == 0 {
None
} else {
Some(target_port as i32)
};
let service =
execute_async(km.service_create(&name, selector_map, port as i32, target_port_opt))?;
Ok(service.metadata.name.unwrap_or(name))
}
/// Create a deployment
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the deployment
/// * `image` - Container image to use
/// * `replicas` - Number of replicas
/// * `labels` - Optional labels as a Map
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Deployment name or an error
fn deployment_create(
km: &mut KubernetesManager,
name: String,
image: String,
replicas: i64,
labels: Map,
env_vars: Map,
) -> Result<String, Box<EvalAltResult>> {
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
None
} else {
Some(
labels
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect(),
)
};
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
let deployment = execute_async(km.deployment_create(
&name,
&image,
replicas as i32,
labels_map,
env_vars_map,
))?;
Ok(deployment.metadata.name.unwrap_or(name))
}
/// Create a ConfigMap
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the ConfigMap
/// * `data` - Data as a Map
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - ConfigMap name or an error
fn configmap_create(
km: &mut KubernetesManager,
name: String,
data: Map,
) -> Result<String, Box<EvalAltResult>> {
let data_map: std::collections::HashMap<String, String> = data
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
let configmap = execute_async(km.configmap_create(&name, data_map))?;
Ok(configmap.metadata.name.unwrap_or(name))
}
/// Create a Secret
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the Secret
/// * `data` - Data as a Map (will be base64 encoded)
/// * `secret_type` - Type of secret (optional, defaults to "Opaque")
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Secret name or an error
fn secret_create(
km: &mut KubernetesManager,
name: String,
data: Map,
secret_type: String,
) -> Result<String, Box<EvalAltResult>> {
let data_map: std::collections::HashMap<String, String> = data
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
let secret_type_opt = if secret_type.is_empty() {
None
} else {
Some(secret_type.as_str())
};
let secret = execute_async(km.secret_create(&name, data_map, secret_type_opt))?;
Ok(secret.metadata.name.unwrap_or(name))
}
/// Get a pod by name
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the pod to get
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Pod name or an error
fn pod_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
let pod = execute_async(km.pod_get(&name))?;
Ok(pod.metadata.name.unwrap_or(name))
}
/// Get a service by name
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the service to get
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Service name or an error
fn service_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
let service = execute_async(km.service_get(&name))?;
Ok(service.metadata.name.unwrap_or(name))
}
/// Get a deployment by name
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the deployment to get
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Deployment name or an error
fn deployment_get(km: &mut KubernetesManager, name: String) -> Result<String, Box<EvalAltResult>> {
let deployment = execute_async(km.deployment_get(&name))?;
Ok(deployment.metadata.name.unwrap_or(name))
}
fn delete(km: &mut KubernetesManager, pattern: String) -> Result<i64, Box<EvalAltResult>> {
let deleted_count = execute_async(km.delete(&pattern))?;
Ok(deleted_count as i64)
}
/// Create a namespace (idempotent operation)
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
/// * `name` - The name of the namespace to create
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
fn namespace_create(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
execute_async(km.namespace_create(&name))
}
/// Delete a namespace (destructive operation)
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the namespace to delete
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
fn namespace_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
execute_async(km.namespace_delete(&name))
}
/// Check if a namespace exists
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
/// * `name` - The name of the namespace to check
///
/// # Returns
///
/// * `Result<bool, Box<EvalAltResult>>` - True if namespace exists, false otherwise
fn namespace_exists(km: &mut KubernetesManager, name: String) -> Result<bool, Box<EvalAltResult>> {
execute_async(km.namespace_exists(&name))
}
/// List all namespaces
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
///
/// # Returns
///
/// * `Result<Array, Box<EvalAltResult>>` - Array of namespace names or an error
fn namespaces_list(km: &mut KubernetesManager) -> Result<Array, Box<EvalAltResult>> {
let namespaces = execute_async(km.namespaces_list())?;
let namespace_names: Array = namespaces
.iter()
.filter_map(|ns| ns.metadata.name.as_ref())
.map(|name| Dynamic::from(name.clone()))
.collect();
Ok(namespace_names)
}
/// Get resource counts for the namespace
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
///
/// # Returns
///
/// * `Result<Map, Box<EvalAltResult>>` - Map of resource counts by type or an error
fn resource_counts(km: &mut KubernetesManager) -> Result<Map, Box<EvalAltResult>> {
let counts = execute_async(km.resource_counts())?;
let mut rhai_map = Map::new();
for (key, value) in counts {
rhai_map.insert(key.into(), Dynamic::from(value as i64));
}
Ok(rhai_map)
}
/// Deploy a complete application with deployment and service
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the application
/// * `image` - Container image to use
/// * `replicas` - Number of replicas
/// * `port` - Port the application listens on
/// * `labels` - Optional labels as a Map
/// * `env_vars` - Optional environment variables as a Map
///
/// # Returns
///
/// * `Result<String, Box<EvalAltResult>>` - Success message or an error
fn deploy_application(
km: &mut KubernetesManager,
name: String,
image: String,
replicas: i64,
port: i64,
labels: Map,
env_vars: Map,
) -> Result<String, Box<EvalAltResult>> {
let labels_map: Option<std::collections::HashMap<String, String>> = if labels.is_empty() {
None
} else {
Some(
labels
.into_iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect(),
)
};
let env_vars_map = convert_rhai_map_to_env_vars(env_vars);
execute_async(km.deploy_application(
&name,
&image,
replicas as i32,
port as i32,
labels_map,
env_vars_map,
))?;
Ok(format!("Successfully deployed application '{name}'"))
}
/// Delete a specific pod by name
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
/// * `name` - The name of the pod to delete
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
fn pod_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
execute_async(km.pod_delete(&name))
}
/// Delete a specific service by name
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
/// * `name` - The name of the service to delete
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
fn service_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
execute_async(km.service_delete(&name))
}
/// Delete a specific deployment by name
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
/// * `name` - The name of the deployment to delete
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
fn deployment_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
execute_async(km.deployment_delete(&name))
}
/// Delete a ConfigMap by name
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the ConfigMap to delete
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
fn configmap_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
execute_async(km.configmap_delete(&name))
}
/// Delete a Secret by name
///
/// # Arguments
///
/// * `km` - Mutable reference to KubernetesManager
/// * `name` - Name of the Secret to delete
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Success or an error
fn secret_delete(km: &mut KubernetesManager, name: String) -> Result<(), Box<EvalAltResult>> {
execute_async(km.secret_delete(&name))
}
/// Get the namespace this manager operates on
///
/// # Arguments
///
/// * `km` - The KubernetesManager instance
///
/// # Returns
///
/// * `String` - The namespace name
fn kubernetes_manager_namespace(km: &mut KubernetesManager) -> String {
km.namespace().to_string()
}
/// Register Kubernetes module functions with the Rhai engine
///
/// # Arguments
///
/// * `engine` - The Rhai engine to register the functions with
///
/// # Returns
///
/// * `Result<(), Box<EvalAltResult>>` - Ok if registration was successful, Err otherwise
pub fn register_kubernetes_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
// Register KubernetesManager type
engine.register_type::<KubernetesManager>();
// Register KubernetesManager constructor and methods
engine.register_fn("kubernetes_manager_new", kubernetes_manager_new);
engine.register_fn("namespace", kubernetes_manager_namespace);
// Register resource listing functions
engine.register_fn("pods_list", pods_list);
engine.register_fn("services_list", services_list);
engine.register_fn("deployments_list", deployments_list);
engine.register_fn("configmaps_list", configmaps_list);
engine.register_fn("secrets_list", secrets_list);
engine.register_fn("namespaces_list", namespaces_list);
// Register resource creation methods (object-oriented style)
engine.register_fn("create_pod", pod_create);
engine.register_fn("create_pod_with_env", pod_create_with_env);
engine.register_fn("create_service", service_create);
engine.register_fn("create_deployment", deployment_create);
engine.register_fn("create_configmap", configmap_create);
engine.register_fn("create_secret", secret_create);
// Register resource get methods
engine.register_fn("get_pod", pod_get);
engine.register_fn("get_service", service_get);
engine.register_fn("get_deployment", deployment_get);
// Register resource management methods
engine.register_fn("delete", delete);
engine.register_fn("delete_pod", pod_delete);
engine.register_fn("delete_service", service_delete);
engine.register_fn("delete_deployment", deployment_delete);
engine.register_fn("delete_configmap", configmap_delete);
engine.register_fn("delete_secret", secret_delete);
// Register namespace methods (object-oriented style)
engine.register_fn("create_namespace", namespace_create);
engine.register_fn("delete_namespace", namespace_delete);
engine.register_fn("namespace_exists", namespace_exists);
// Register utility functions
engine.register_fn("resource_counts", resource_counts);
// Register convenience functions
engine.register_fn("deploy_application", deploy_application);
Ok(())
}
// Helper function for error conversion
fn kubernetes_error_to_rhai_error(error: KubernetesError) -> Box<EvalAltResult> {
Box::new(EvalAltResult::ErrorRuntime(
format!("Kubernetes error: {error}").into(),
rhai::Position::NONE,
))
}

View File

@ -1,253 +0,0 @@
//! CRUD operations tests for SAL Kubernetes
//!
//! These tests verify that all Create, Read, Update, Delete operations work correctly.
#[cfg(test)]
mod crud_tests {
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
/// Check if Kubernetes integration tests should run
fn should_run_k8s_tests() -> bool {
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
}
#[tokio::test]
async fn test_complete_crud_operations() {
if !should_run_k8s_tests() {
println!("Skipping CRUD test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
println!("🔍 Testing complete CRUD operations...");
// Create a test namespace for our operations
let test_namespace = "sal-crud-test";
let km = KubernetesManager::new("default")
.await
.expect("Should connect to cluster");
// Clean up any existing test namespace
let _ = km.namespace_delete(test_namespace).await;
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
// CREATE operations
println!("\n=== CREATE Operations ===");
// 1. Create namespace
km.namespace_create(test_namespace)
.await
.expect("Should create test namespace");
println!("✅ Created namespace: {}", test_namespace);
// Switch to test namespace
let test_km = KubernetesManager::new(test_namespace)
.await
.expect("Should connect to test namespace");
// 2. Create ConfigMap
let mut config_data = HashMap::new();
config_data.insert(
"app.properties".to_string(),
"debug=true\nport=8080".to_string(),
);
config_data.insert(
"config.yaml".to_string(),
"key: value\nenv: test".to_string(),
);
let configmap = test_km
.configmap_create("test-config", config_data)
.await
.expect("Should create ConfigMap");
println!(
"✅ Created ConfigMap: {}",
configmap.metadata.name.unwrap_or_default()
);
// 3. Create Secret
let mut secret_data = HashMap::new();
secret_data.insert("username".to_string(), "testuser".to_string());
secret_data.insert("password".to_string(), "secret123".to_string());
let secret = test_km
.secret_create("test-secret", secret_data, None)
.await
.expect("Should create Secret");
println!(
"✅ Created Secret: {}",
secret.metadata.name.unwrap_or_default()
);
// 4. Create Pod
let mut pod_labels = HashMap::new();
pod_labels.insert("app".to_string(), "test-app".to_string());
pod_labels.insert("version".to_string(), "v1".to_string());
let pod = test_km
.pod_create("test-pod", "nginx:alpine", Some(pod_labels.clone()), None)
.await
.expect("Should create Pod");
println!("✅ Created Pod: {}", pod.metadata.name.unwrap_or_default());
// 5. Create Service
let service = test_km
.service_create("test-service", pod_labels.clone(), 80, Some(80))
.await
.expect("Should create Service");
println!(
"✅ Created Service: {}",
service.metadata.name.unwrap_or_default()
);
// 6. Create Deployment
let deployment = test_km
.deployment_create("test-deployment", "nginx:alpine", 2, Some(pod_labels), None)
.await
.expect("Should create Deployment");
println!(
"✅ Created Deployment: {}",
deployment.metadata.name.unwrap_or_default()
);
// READ operations
println!("\n=== READ Operations ===");
// List all resources
let pods = test_km.pods_list().await.expect("Should list pods");
println!("✅ Listed {} pods", pods.len());
let services = test_km.services_list().await.expect("Should list services");
println!("✅ Listed {} services", services.len());
let deployments = test_km
.deployments_list()
.await
.expect("Should list deployments");
println!("✅ Listed {} deployments", deployments.len());
let configmaps = test_km
.configmaps_list()
.await
.expect("Should list configmaps");
println!("✅ Listed {} configmaps", configmaps.len());
let secrets = test_km.secrets_list().await.expect("Should list secrets");
println!("✅ Listed {} secrets", secrets.len());
// Get specific resources
let pod = test_km.pod_get("test-pod").await.expect("Should get pod");
println!(
"✅ Retrieved pod: {}",
pod.metadata.name.unwrap_or_default()
);
let service = test_km
.service_get("test-service")
.await
.expect("Should get service");
println!(
"✅ Retrieved service: {}",
service.metadata.name.unwrap_or_default()
);
let deployment = test_km
.deployment_get("test-deployment")
.await
.expect("Should get deployment");
println!(
"✅ Retrieved deployment: {}",
deployment.metadata.name.unwrap_or_default()
);
// Resource counts
let counts = test_km
.resource_counts()
.await
.expect("Should get resource counts");
println!("✅ Resource counts: {:?}", counts);
// DELETE operations
println!("\n=== DELETE Operations ===");
// Delete individual resources
test_km
.pod_delete("test-pod")
.await
.expect("Should delete pod");
println!("✅ Deleted pod");
test_km
.service_delete("test-service")
.await
.expect("Should delete service");
println!("✅ Deleted service");
test_km
.deployment_delete("test-deployment")
.await
.expect("Should delete deployment");
println!("✅ Deleted deployment");
test_km
.configmap_delete("test-config")
.await
.expect("Should delete configmap");
println!("✅ Deleted configmap");
test_km
.secret_delete("test-secret")
.await
.expect("Should delete secret");
println!("✅ Deleted secret");
// Verify resources are deleted
let final_counts = test_km
.resource_counts()
.await
.expect("Should get final resource counts");
println!("✅ Final resource counts: {:?}", final_counts);
// Delete the test namespace
km.namespace_delete(test_namespace)
.await
.expect("Should delete test namespace");
println!("✅ Deleted test namespace");
println!("\n🎉 All CRUD operations completed successfully!");
}
#[tokio::test]
async fn test_error_handling_in_crud() {
if !should_run_k8s_tests() {
println!("Skipping CRUD error handling test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
println!("🔍 Testing error handling in CRUD operations...");
let km = KubernetesManager::new("default")
.await
.expect("Should connect to cluster");
// Test creating resources with invalid names
let result = km.pod_create("", "nginx", None, None).await;
assert!(result.is_err(), "Should fail with empty pod name");
println!("✅ Empty pod name properly rejected");
// Test getting non-existent resources
let result = km.pod_get("non-existent-pod").await;
assert!(result.is_err(), "Should fail to get non-existent pod");
println!("✅ Non-existent pod properly handled");
// Test deleting non-existent resources
let result = km.service_delete("non-existent-service").await;
assert!(
result.is_err(),
"Should fail to delete non-existent service"
);
println!("✅ Non-existent service deletion properly handled");
println!("✅ Error handling in CRUD operations is robust");
}
}

View File

@ -1,384 +0,0 @@
//! Tests for deployment creation with environment variables
//!
//! These tests verify the new environment variable functionality in deployments
//! and the enhanced deploy_application method.
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
/// Check if Kubernetes integration tests should run
fn should_run_k8s_tests() -> bool {
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
}
#[tokio::test]
async fn test_deployment_create_with_env_vars() {
if !should_run_k8s_tests() {
println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return, // Skip if can't connect
};
// Clean up any existing test deployment
let _ = km.deployment_delete("test-env-deployment").await;
// Create deployment with environment variables
let mut labels = HashMap::new();
labels.insert("app".to_string(), "test-env-app".to_string());
labels.insert("test".to_string(), "env-vars".to_string());
let mut env_vars = HashMap::new();
env_vars.insert("TEST_VAR_1".to_string(), "value1".to_string());
env_vars.insert("TEST_VAR_2".to_string(), "value2".to_string());
env_vars.insert("NODE_ENV".to_string(), "test".to_string());
let result = km
.deployment_create(
"test-env-deployment",
"nginx:latest",
1,
Some(labels),
Some(env_vars),
)
.await;
assert!(
result.is_ok(),
"Failed to create deployment with env vars: {:?}",
result
);
// Verify the deployment was created
let deployment = km.deployment_get("test-env-deployment").await;
assert!(deployment.is_ok(), "Failed to get created deployment");
let deployment = deployment.unwrap();
// Verify environment variables are set in the container spec
if let Some(spec) = &deployment.spec {
if let Some(template) = &spec.template.spec {
if let Some(container) = template.containers.first() {
if let Some(env) = &container.env {
// Check that our environment variables are present
let env_map: HashMap<String, String> = env
.iter()
.filter_map(|e| e.value.as_ref().map(|v| (e.name.clone(), v.clone())))
.collect();
assert_eq!(env_map.get("TEST_VAR_1"), Some(&"value1".to_string()));
assert_eq!(env_map.get("TEST_VAR_2"), Some(&"value2".to_string()));
assert_eq!(env_map.get("NODE_ENV"), Some(&"test".to_string()));
} else {
panic!("No environment variables found in container spec");
}
}
}
}
// Clean up
let _ = km.deployment_delete("test-env-deployment").await;
}
#[tokio::test]
async fn test_pod_create_with_env_vars() {
if !should_run_k8s_tests() {
println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return, // Skip if can't connect
};
// Clean up any existing test pod
let _ = km.pod_delete("test-env-pod").await;
// Create pod with environment variables
let mut env_vars = HashMap::new();
env_vars.insert("NODE_ENV".to_string(), "test".to_string());
env_vars.insert(
"DATABASE_URL".to_string(),
"postgres://localhost:5432/test".to_string(),
);
env_vars.insert("API_KEY".to_string(), "test-api-key-12345".to_string());
let mut labels = HashMap::new();
labels.insert("app".to_string(), "test-env-pod-app".to_string());
labels.insert("test".to_string(), "environment-variables".to_string());
let result = km
.pod_create("test-env-pod", "nginx:latest", Some(labels), Some(env_vars))
.await;
assert!(
result.is_ok(),
"Failed to create pod with env vars: {:?}",
result
);
if let Ok(pod) = result {
let pod_name = pod
.metadata
.name
.as_ref()
.unwrap_or(&"".to_string())
.clone();
assert_eq!(pod_name, "test-env-pod");
println!("✅ Created pod with environment variables: {}", pod_name);
// Verify the pod has the expected environment variables
if let Some(spec) = &pod.spec {
if let Some(container) = spec.containers.first() {
if let Some(env) = &container.env {
let env_names: Vec<String> = env.iter().map(|e| e.name.clone()).collect();
assert!(env_names.contains(&"NODE_ENV".to_string()));
assert!(env_names.contains(&"DATABASE_URL".to_string()));
assert!(env_names.contains(&"API_KEY".to_string()));
println!("✅ Pod has expected environment variables");
}
}
}
}
// Clean up
let _ = km.pod_delete("test-env-pod").await;
}
#[tokio::test]
async fn test_deployment_create_without_env_vars() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Clean up any existing test deployment
let _ = km.deployment_delete("test-no-env-deployment").await;
// Create deployment without environment variables
let mut labels = HashMap::new();
labels.insert("app".to_string(), "test-no-env-app".to_string());
let result = km
.deployment_create(
"test-no-env-deployment",
"nginx:latest",
1,
Some(labels),
None, // No environment variables
)
.await;
assert!(
result.is_ok(),
"Failed to create deployment without env vars: {:?}",
result
);
// Verify the deployment was created
let deployment = km.deployment_get("test-no-env-deployment").await;
assert!(deployment.is_ok(), "Failed to get created deployment");
let deployment = deployment.unwrap();
// Verify no environment variables are set
if let Some(spec) = &deployment.spec {
if let Some(template) = &spec.template.spec {
if let Some(container) = template.containers.first() {
// Environment variables should be None or empty
assert!(
container.env.is_none() || container.env.as_ref().unwrap().is_empty(),
"Expected no environment variables, but found some"
);
}
}
}
// Clean up
let _ = km.deployment_delete("test-no-env-deployment").await;
}
#[tokio::test]
async fn test_deploy_application_with_env_vars() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Clean up any existing resources
let _ = km.deployment_delete("test-app-env").await;
let _ = km.service_delete("test-app-env").await;
// Deploy application with both labels and environment variables
let mut labels = HashMap::new();
labels.insert("app".to_string(), "test-app-env".to_string());
labels.insert("tier".to_string(), "backend".to_string());
let mut env_vars = HashMap::new();
env_vars.insert(
"DATABASE_URL".to_string(),
"postgres://localhost:5432/test".to_string(),
);
env_vars.insert("API_KEY".to_string(), "test-api-key".to_string());
env_vars.insert("LOG_LEVEL".to_string(), "debug".to_string());
let result = km
.deploy_application(
"test-app-env",
"nginx:latest",
2,
80,
Some(labels),
Some(env_vars),
)
.await;
assert!(
result.is_ok(),
"Failed to deploy application with env vars: {:?}",
result
);
// Verify both deployment and service were created
let deployment = km.deployment_get("test-app-env").await;
assert!(deployment.is_ok(), "Deployment should be created");
let service = km.service_get("test-app-env").await;
assert!(service.is_ok(), "Service should be created");
// Verify environment variables in deployment
let deployment = deployment.unwrap();
if let Some(spec) = &deployment.spec {
if let Some(template) = &spec.template.spec {
if let Some(container) = template.containers.first() {
if let Some(env) = &container.env {
let env_map: HashMap<String, String> = env
.iter()
.filter_map(|e| e.value.as_ref().map(|v| (e.name.clone(), v.clone())))
.collect();
assert_eq!(
env_map.get("DATABASE_URL"),
Some(&"postgres://localhost:5432/test".to_string())
);
assert_eq!(env_map.get("API_KEY"), Some(&"test-api-key".to_string()));
assert_eq!(env_map.get("LOG_LEVEL"), Some(&"debug".to_string()));
}
}
}
}
// Clean up
let _ = km.deployment_delete("test-app-env").await;
let _ = km.service_delete("test-app-env").await;
}
#[tokio::test]
async fn test_deploy_application_cleanup_existing_resources() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => {
println!("Skipping test - no Kubernetes cluster available");
return;
}
};
let app_name = "test-cleanup-app";
// Clean up any existing resources first to ensure clean state
let _ = km.deployment_delete(app_name).await;
let _ = km.service_delete(app_name).await;
// Wait a moment for cleanup to complete
tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
// First deployment
let result = km
.deploy_application(app_name, "nginx:latest", 1, 80, None, None)
.await;
if result.is_err() {
println!("Skipping test - cluster connection unstable: {:?}", result);
return;
}
// Verify resources exist (with graceful handling)
let deployment_exists = km.deployment_get(app_name).await.is_ok();
let service_exists = km.service_get(app_name).await.is_ok();
if !deployment_exists || !service_exists {
println!("Skipping test - resources not created properly");
let _ = km.deployment_delete(app_name).await;
let _ = km.service_delete(app_name).await;
return;
}
// Second deployment with different configuration (should replace the first)
let mut env_vars = HashMap::new();
env_vars.insert("VERSION".to_string(), "2.0".to_string());
let result = km
.deploy_application(app_name, "nginx:alpine", 2, 80, None, Some(env_vars))
.await;
if result.is_err() {
println!(
"Skipping verification - second deployment failed: {:?}",
result
);
let _ = km.deployment_delete(app_name).await;
let _ = km.service_delete(app_name).await;
return;
}
// Verify resources still exist (replaced, not duplicated)
let deployment = km.deployment_get(app_name).await;
if deployment.is_err() {
println!("Skipping verification - deployment not found after replacement");
let _ = km.deployment_delete(app_name).await;
let _ = km.service_delete(app_name).await;
return;
}
// Verify the new configuration
let deployment = deployment.unwrap();
if let Some(spec) = &deployment.spec {
assert_eq!(spec.replicas, Some(2), "Replicas should be updated to 2");
if let Some(template) = &spec.template.spec {
if let Some(container) = template.containers.first() {
assert_eq!(
container.image,
Some("nginx:alpine".to_string()),
"Image should be updated"
);
if let Some(env) = &container.env {
let has_version = env
.iter()
.any(|e| e.name == "VERSION" && e.value == Some("2.0".to_string()));
assert!(has_version, "Environment variable VERSION should be set");
}
}
}
}
// Clean up
let _ = km.deployment_delete(app_name).await;
let _ = km.service_delete(app_name).await;
}

View File

@ -1,293 +0,0 @@
//! Edge case and error scenario tests for Kubernetes module
//!
//! These tests verify proper error handling and edge case behavior.
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
/// Check if Kubernetes integration tests should run
fn should_run_k8s_tests() -> bool {
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
}
#[tokio::test]
async fn test_deployment_with_invalid_image() {
if !should_run_k8s_tests() {
println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Clean up any existing test deployment
let _ = km.deployment_delete("test-invalid-image").await;
// Try to create deployment with invalid image name
let result = km
.deployment_create(
"test-invalid-image",
"invalid/image/name/that/does/not/exist:latest",
1,
None,
None,
)
.await;
// The deployment creation should succeed (Kubernetes validates images at runtime)
assert!(result.is_ok(), "Deployment creation should succeed even with invalid image");
// Clean up
let _ = km.deployment_delete("test-invalid-image").await;
}
#[tokio::test]
async fn test_deployment_with_empty_name() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Try to create deployment with empty name
let result = km
.deployment_create("", "nginx:latest", 1, None, None)
.await;
// Should fail due to invalid name
assert!(result.is_err(), "Deployment with empty name should fail");
}
#[tokio::test]
async fn test_deployment_with_invalid_replicas() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Clean up any existing test deployment
let _ = km.deployment_delete("test-invalid-replicas").await;
// Try to create deployment with negative replicas
let result = km
.deployment_create("test-invalid-replicas", "nginx:latest", -1, None, None)
.await;
// Should fail due to invalid replica count
assert!(result.is_err(), "Deployment with negative replicas should fail");
}
#[tokio::test]
async fn test_deployment_with_large_env_vars() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Clean up any existing test deployment
let _ = km.deployment_delete("test-large-env").await;
// Create deployment with many environment variables
let mut env_vars = HashMap::new();
for i in 0..50 {
env_vars.insert(format!("TEST_VAR_{}", i), format!("value_{}", i));
}
let result = km
.deployment_create("test-large-env", "nginx:latest", 1, None, Some(env_vars))
.await;
assert!(result.is_ok(), "Deployment with many env vars should succeed: {:?}", result);
// Verify the deployment was created
let deployment = km.deployment_get("test-large-env").await;
assert!(deployment.is_ok(), "Should be able to get deployment with many env vars");
// Verify environment variables count
let deployment = deployment.unwrap();
if let Some(spec) = &deployment.spec {
if let Some(template) = &spec.template.spec {
if let Some(container) = template.containers.first() {
if let Some(env) = &container.env {
assert_eq!(env.len(), 50, "Should have 50 environment variables");
}
}
}
}
// Clean up
let _ = km.deployment_delete("test-large-env").await;
}
#[tokio::test]
async fn test_deployment_with_special_characters_in_env_vars() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Clean up any existing test deployment
let _ = km.deployment_delete("test-special-env").await;
// Create deployment with special characters in environment variables
let mut env_vars = HashMap::new();
env_vars.insert("DATABASE_URL".to_string(), "postgres://user:pass@host:5432/db?ssl=true".to_string());
env_vars.insert("JSON_CONFIG".to_string(), r#"{"key": "value", "number": 123}"#.to_string());
env_vars.insert("MULTILINE_VAR".to_string(), "line1\nline2\nline3".to_string());
env_vars.insert("SPECIAL_CHARS".to_string(), "!@#$%^&*()_+-=[]{}|;:,.<>?".to_string());
let result = km
.deployment_create("test-special-env", "nginx:latest", 1, None, Some(env_vars))
.await;
assert!(result.is_ok(), "Deployment with special chars in env vars should succeed: {:?}", result);
// Verify the deployment was created and env vars are preserved
let deployment = km.deployment_get("test-special-env").await;
assert!(deployment.is_ok(), "Should be able to get deployment");
let deployment = deployment.unwrap();
if let Some(spec) = &deployment.spec {
if let Some(template) = &spec.template.spec {
if let Some(container) = template.containers.first() {
if let Some(env) = &container.env {
let env_map: HashMap<String, String> = env
.iter()
.filter_map(|e| e.value.as_ref().map(|v| (e.name.clone(), v.clone())))
.collect();
assert_eq!(
env_map.get("DATABASE_URL"),
Some(&"postgres://user:pass@host:5432/db?ssl=true".to_string())
);
assert_eq!(
env_map.get("JSON_CONFIG"),
Some(&r#"{"key": "value", "number": 123}"#.to_string())
);
assert_eq!(
env_map.get("SPECIAL_CHARS"),
Some(&"!@#$%^&*()_+-=[]{}|;:,.<>?".to_string())
);
}
}
}
}
// Clean up
let _ = km.deployment_delete("test-special-env").await;
}
#[tokio::test]
async fn test_deploy_application_with_invalid_port() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Try to deploy application with invalid port (negative)
let result = km
.deploy_application("test-invalid-port", "nginx:latest", 1, -80, None, None)
.await;
// Should fail due to invalid port
assert!(result.is_err(), "Deploy application with negative port should fail");
// Try with port 0
let result = km
.deploy_application("test-zero-port", "nginx:latest", 1, 0, None, None)
.await;
// Should fail due to invalid port
assert!(result.is_err(), "Deploy application with port 0 should fail");
}
#[tokio::test]
async fn test_get_nonexistent_deployment() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Try to get a deployment that doesn't exist
let result = km.deployment_get("nonexistent-deployment-12345").await;
// Should fail with appropriate error
assert!(result.is_err(), "Getting nonexistent deployment should fail");
}
#[tokio::test]
async fn test_delete_nonexistent_deployment() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Try to delete a deployment that doesn't exist
let result = km.deployment_delete("nonexistent-deployment-12345").await;
// Should fail gracefully
assert!(result.is_err(), "Deleting nonexistent deployment should fail");
}
#[tokio::test]
async fn test_deployment_with_zero_replicas() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Clean up any existing test deployment
let _ = km.deployment_delete("test-zero-replicas").await;
// Create deployment with zero replicas (should be valid)
let result = km
.deployment_create("test-zero-replicas", "nginx:latest", 0, None, None)
.await;
assert!(result.is_ok(), "Deployment with zero replicas should succeed: {:?}", result);
// Verify the deployment was created with 0 replicas
let deployment = km.deployment_get("test-zero-replicas").await;
assert!(deployment.is_ok(), "Should be able to get deployment with zero replicas");
let deployment = deployment.unwrap();
if let Some(spec) = &deployment.spec {
assert_eq!(spec.replicas, Some(0), "Should have 0 replicas");
}
// Clean up
let _ = km.deployment_delete("test-zero-replicas").await;
}

View File

@ -1,385 +0,0 @@
//! Integration tests for SAL Kubernetes
//!
//! These tests require a running Kubernetes cluster and appropriate credentials.
//! Set KUBERNETES_TEST_ENABLED=1 to run these tests.
use sal_kubernetes::KubernetesManager;
/// Check if Kubernetes integration tests should run
fn should_run_k8s_tests() -> bool {
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
}
#[tokio::test]
async fn test_kubernetes_manager_creation() {
if !should_run_k8s_tests() {
println!("Skipping Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
let result = KubernetesManager::new("default").await;
match result {
Ok(_) => println!("Successfully created KubernetesManager"),
Err(e) => println!("Failed to create KubernetesManager: {}", e),
}
}
#[tokio::test]
async fn test_namespace_operations() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return, // Skip if can't connect
};
// Test namespace creation (should be idempotent)
let test_namespace = "sal-test-namespace";
let result = km.namespace_create(test_namespace).await;
assert!(result.is_ok(), "Failed to create namespace: {:?}", result);
// Test creating the same namespace again (should not error)
let result = km.namespace_create(test_namespace).await;
assert!(
result.is_ok(),
"Failed to create namespace idempotently: {:?}",
result
);
}
#[tokio::test]
async fn test_pods_list() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return, // Skip if can't connect
};
let result = km.pods_list().await;
match result {
Ok(pods) => {
println!("Found {} pods in default namespace", pods.len());
// Verify pod structure
for pod in pods.iter().take(3) {
// Check first 3 pods
assert!(pod.metadata.name.is_some());
assert!(pod.metadata.namespace.is_some());
println!(
"Pod: {} in namespace: {}",
pod.metadata.name.as_ref().unwrap(),
pod.metadata.namespace.as_ref().unwrap()
);
}
}
Err(e) => {
println!("Failed to list pods: {}", e);
// Don't fail the test if we can't list pods due to permissions
}
}
}
#[tokio::test]
async fn test_services_list() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
let result = km.services_list().await;
match result {
Ok(services) => {
println!("Found {} services in default namespace", services.len());
// Verify service structure
for service in services.iter().take(3) {
assert!(service.metadata.name.is_some());
println!("Service: {}", service.metadata.name.as_ref().unwrap());
}
}
Err(e) => {
println!("Failed to list services: {}", e);
}
}
}
#[tokio::test]
async fn test_deployments_list() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
let result = km.deployments_list().await;
match result {
Ok(deployments) => {
println!(
"Found {} deployments in default namespace",
deployments.len()
);
// Verify deployment structure
for deployment in deployments.iter().take(3) {
assert!(deployment.metadata.name.is_some());
println!("Deployment: {}", deployment.metadata.name.as_ref().unwrap());
}
}
Err(e) => {
println!("Failed to list deployments: {}", e);
}
}
}
#[tokio::test]
async fn test_resource_counts() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
let result = km.resource_counts().await;
match result {
Ok(counts) => {
println!("Resource counts: {:?}", counts);
// Verify expected resource types are present
assert!(counts.contains_key("pods"));
assert!(counts.contains_key("services"));
assert!(counts.contains_key("deployments"));
assert!(counts.contains_key("configmaps"));
assert!(counts.contains_key("secrets"));
// Verify counts are reasonable (counts are usize, so always non-negative)
for (resource_type, count) in counts {
// Verify we got a count for each resource type
println!("Resource type '{}' has {} items", resource_type, count);
// Counts should be reasonable (not impossibly large)
assert!(
count < 10000,
"Count for {} seems unreasonably high: {}",
resource_type,
count
);
}
}
Err(e) => {
println!("Failed to get resource counts: {}", e);
}
}
}
#[tokio::test]
async fn test_namespaces_list() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
let result = km.namespaces_list().await;
match result {
Ok(namespaces) => {
println!("Found {} namespaces", namespaces.len());
// Should have at least default namespace
let namespace_names: Vec<String> = namespaces
.iter()
.filter_map(|ns| ns.metadata.name.as_ref())
.cloned()
.collect();
println!("Namespaces: {:?}", namespace_names);
assert!(namespace_names.contains(&"default".to_string()));
}
Err(e) => {
println!("Failed to list namespaces: {}", e);
}
}
}
#[tokio::test]
async fn test_pattern_matching_dry_run() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Test pattern matching without actually deleting anything
// We'll just verify that the regex patterns work correctly
let test_patterns = vec![
"test-.*", // Should match anything starting with "test-"
".*-temp$", // Should match anything ending with "-temp"
"nonexistent-.*", // Should match nothing (hopefully)
];
for pattern in test_patterns {
println!("Testing pattern: {}", pattern);
// Get all pods first
if let Ok(pods) = km.pods_list().await {
let regex = regex::Regex::new(pattern).unwrap();
let matching_pods: Vec<_> = pods
.iter()
.filter_map(|pod| pod.metadata.name.as_ref())
.filter(|name| regex.is_match(name))
.collect();
println!(
"Pattern '{}' would match {} pods: {:?}",
pattern,
matching_pods.len(),
matching_pods
);
}
}
}
#[tokio::test]
async fn test_namespace_exists_functionality() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Test that default namespace exists
let result = km.namespace_exists("default").await;
match result {
Ok(exists) => {
assert!(exists, "Default namespace should exist");
println!("Default namespace exists: {}", exists);
}
Err(e) => {
println!("Failed to check if default namespace exists: {}", e);
}
}
// Test that a non-existent namespace doesn't exist
let result = km.namespace_exists("definitely-does-not-exist-12345").await;
match result {
Ok(exists) => {
assert!(!exists, "Non-existent namespace should not exist");
println!("Non-existent namespace exists: {}", exists);
}
Err(e) => {
println!("Failed to check if non-existent namespace exists: {}", e);
}
}
}
#[tokio::test]
async fn test_manager_namespace_property() {
if !should_run_k8s_tests() {
return;
}
let test_namespace = "test-namespace";
let km = match KubernetesManager::new(test_namespace).await {
Ok(km) => km,
Err(_) => return,
};
// Verify the manager knows its namespace
assert_eq!(km.namespace(), test_namespace);
println!("Manager namespace: {}", km.namespace());
}
#[tokio::test]
async fn test_error_handling() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Test getting a non-existent pod
let result = km.pod_get("definitely-does-not-exist-12345").await;
assert!(result.is_err(), "Getting non-existent pod should fail");
if let Err(e) = result {
println!("Expected error for non-existent pod: {}", e);
// Verify it's the right kind of error
match e {
sal_kubernetes::KubernetesError::ApiError(_) => {
println!("Correctly got API error for non-existent resource");
}
_ => {
println!("Got unexpected error type: {:?}", e);
}
}
}
}
#[tokio::test]
async fn test_configmaps_and_secrets() {
if !should_run_k8s_tests() {
return;
}
let km = match KubernetesManager::new("default").await {
Ok(km) => km,
Err(_) => return,
};
// Test configmaps listing
let result = km.configmaps_list().await;
match result {
Ok(configmaps) => {
println!("Found {} configmaps in default namespace", configmaps.len());
for cm in configmaps.iter().take(3) {
if let Some(name) = &cm.metadata.name {
println!("ConfigMap: {}", name);
}
}
}
Err(e) => {
println!("Failed to list configmaps: {}", e);
}
}
// Test secrets listing
let result = km.secrets_list().await;
match result {
Ok(secrets) => {
println!("Found {} secrets in default namespace", secrets.len());
for secret in secrets.iter().take(3) {
if let Some(name) = &secret.metadata.name {
println!("Secret: {}", name);
}
}
}
Err(e) => {
println!("Failed to list secrets: {}", e);
}
}
}

View File

@ -1,231 +0,0 @@
//! Production readiness tests for SAL Kubernetes
//!
//! These tests verify that the module is ready for real-world production use.
#[cfg(test)]
mod production_tests {
use sal_kubernetes::{KubernetesConfig, KubernetesManager};
use std::time::Duration;
/// Check if Kubernetes integration tests should run
fn should_run_k8s_tests() -> bool {
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
}
#[tokio::test]
async fn test_production_configuration_profiles() {
// Test all pre-configured profiles work
let configs = vec![
("default", KubernetesConfig::default()),
("high_throughput", KubernetesConfig::high_throughput()),
("low_latency", KubernetesConfig::low_latency()),
("development", KubernetesConfig::development()),
];
for (name, config) in configs {
println!("Testing {} configuration profile", name);
// Verify configuration values are reasonable
assert!(
config.operation_timeout >= Duration::from_secs(5),
"{} timeout too short",
name
);
assert!(
config.operation_timeout <= Duration::from_secs(300),
"{} timeout too long",
name
);
assert!(config.max_retries <= 10, "{} too many retries", name);
assert!(config.rate_limit_rps >= 1, "{} rate limit too low", name);
assert!(
config.rate_limit_burst >= config.rate_limit_rps,
"{} burst should be >= RPS",
name
);
println!("{} configuration is valid", name);
}
}
#[tokio::test]
async fn test_real_cluster_operations() {
if !should_run_k8s_tests() {
println!("Skipping real cluster test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
println!("🔍 Testing production operations with real cluster...");
// Test with production-like configuration
let config = KubernetesConfig::default()
.with_timeout(Duration::from_secs(30))
.with_retries(3, Duration::from_secs(1), Duration::from_secs(10))
.with_rate_limit(5, 10); // Conservative for testing
let km = KubernetesManager::with_config("default", config)
.await
.expect("Should connect to cluster");
println!("✅ Connected to cluster successfully");
// Test basic operations
let namespaces = km.namespaces_list().await.expect("Should list namespaces");
println!("✅ Listed {} namespaces", namespaces.len());
let pods = km.pods_list().await.expect("Should list pods");
println!("✅ Listed {} pods in default namespace", pods.len());
let counts = km
.resource_counts()
.await
.expect("Should get resource counts");
println!("✅ Got resource counts for {} resource types", counts.len());
// Test namespace operations
let test_ns = "sal-production-test";
km.namespace_create(test_ns)
.await
.expect("Should create test namespace");
println!("✅ Created test namespace: {}", test_ns);
let exists = km
.namespace_exists(test_ns)
.await
.expect("Should check namespace existence");
assert!(exists, "Test namespace should exist");
println!("✅ Verified test namespace exists");
println!("🎉 All production operations completed successfully!");
}
#[tokio::test]
async fn test_error_handling_robustness() {
if !should_run_k8s_tests() {
println!("Skipping error handling test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
println!("🔍 Testing error handling robustness...");
let km = KubernetesManager::new("default")
.await
.expect("Should connect to cluster");
// Test with invalid namespace name (should handle gracefully)
let result = km.namespace_exists("").await;
match result {
Ok(_) => println!("✅ Empty namespace name handled"),
Err(e) => println!("✅ Empty namespace name rejected: {}", e),
}
// Test with very long namespace name
let long_name = "a".repeat(100);
let result = km.namespace_exists(&long_name).await;
match result {
Ok(_) => println!("✅ Long namespace name handled"),
Err(e) => println!("✅ Long namespace name rejected: {}", e),
}
println!("✅ Error handling is robust");
}
#[tokio::test]
async fn test_concurrent_operations() {
if !should_run_k8s_tests() {
println!("Skipping concurrency test. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
println!("🔍 Testing concurrent operations...");
let km = KubernetesManager::new("default")
.await
.expect("Should connect to cluster");
// Test multiple concurrent operations
let task1 = tokio::spawn({
let km = km.clone();
async move { km.pods_list().await }
});
let task2 = tokio::spawn({
let km = km.clone();
async move { km.services_list().await }
});
let task3 = tokio::spawn({
let km = km.clone();
async move { km.namespaces_list().await }
});
let mut success_count = 0;
// Handle each task result
match task1.await {
Ok(Ok(_)) => {
success_count += 1;
println!("✅ Pods list operation succeeded");
}
Ok(Err(e)) => println!("⚠️ Pods list operation failed: {}", e),
Err(e) => println!("⚠️ Pods task join failed: {}", e),
}
match task2.await {
Ok(Ok(_)) => {
success_count += 1;
println!("✅ Services list operation succeeded");
}
Ok(Err(e)) => println!("⚠️ Services list operation failed: {}", e),
Err(e) => println!("⚠️ Services task join failed: {}", e),
}
match task3.await {
Ok(Ok(_)) => {
success_count += 1;
println!("✅ Namespaces list operation succeeded");
}
Ok(Err(e)) => println!("⚠️ Namespaces list operation failed: {}", e),
Err(e) => println!("⚠️ Namespaces task join failed: {}", e),
}
assert!(
success_count >= 2,
"At least 2 concurrent operations should succeed"
);
println!(
"✅ Concurrent operations handled well ({}/3 succeeded)",
success_count
);
}
#[test]
fn test_security_and_validation() {
println!("🔍 Testing security and validation...");
// Test regex pattern validation
let dangerous_patterns = vec![
".*", // Too broad
".+", // Too broad
"", // Empty
"a{1000000}", // Potential ReDoS
];
for pattern in dangerous_patterns {
match regex::Regex::new(pattern) {
Ok(_) => println!("⚠️ Pattern '{}' accepted (review if safe)", pattern),
Err(_) => println!("✅ Pattern '{}' rejected", pattern),
}
}
// Test safe patterns
let safe_patterns = vec!["^test-.*$", "^app-[a-z0-9]+$", "^namespace-\\d+$"];
for pattern in safe_patterns {
match regex::Regex::new(pattern) {
Ok(_) => println!("✅ Safe pattern '{}' accepted", pattern),
Err(e) => println!("❌ Safe pattern '{}' rejected: {}", pattern, e),
}
}
println!("✅ Security validation completed");
}
}

View File

@ -1,62 +0,0 @@
//! Basic Kubernetes operations test
//!
//! This script tests basic Kubernetes functionality through Rhai.
print("=== Basic Kubernetes Operations Test ===");
// Test 1: Create KubernetesManager
print("Test 1: Creating KubernetesManager...");
let km = kubernetes_manager_new("default");
let ns = namespace(km);
print("✓ Created manager for namespace: " + ns);
if ns != "default" {
print("❌ ERROR: Expected namespace 'default', got '" + ns + "'");
} else {
print("✓ Namespace validation passed");
}
// Test 2: Function availability check
print("\nTest 2: Checking function availability...");
let functions = [
"pods_list",
"services_list",
"deployments_list",
"namespaces_list",
"resource_counts",
"namespace_create",
"namespace_exists",
"delete",
"pod_delete",
"service_delete",
"deployment_delete"
];
for func_name in functions {
print("✓ Function '" + func_name + "' is available");
}
// Test 3: Basic operations (if cluster is available)
print("\nTest 3: Testing basic operations...");
try {
// Test namespace existence
let default_exists = namespace_exists(km, "default");
print("✓ Default namespace exists: " + default_exists);
// Test resource counting
let counts = resource_counts(km);
print("✓ Resource counts retrieved: " + counts.len() + " resource types");
// Test namespace listing
let namespaces = namespaces_list(km);
print("✓ Found " + namespaces.len() + " namespaces");
// Test pod listing
let pods = pods_list(km);
print("✓ Found " + pods.len() + " pods in default namespace");
print("\n=== All basic tests passed! ===");
} catch(e) {
print("Note: Some operations failed (likely no cluster): " + e);
print("✓ Function registration tests passed");
}

View File

@ -1,200 +0,0 @@
//! CRUD operations test in Rhai
//!
//! This script tests all Create, Read, Update, Delete operations through Rhai.
print("=== CRUD Operations Test ===");
// Test 1: Create manager
print("Test 1: Creating KubernetesManager...");
let km = kubernetes_manager_new("default");
print("✓ Manager created for namespace: " + namespace(km));
// Test 2: Create test namespace
print("\nTest 2: Creating test namespace...");
let test_ns = "rhai-crud-test";
try {
km.create_namespace(test_ns);
print("✓ Created test namespace: " + test_ns);
// Verify it exists
let exists = km.namespace_exists(test_ns);
if exists {
print("✓ Verified test namespace exists");
} else {
print("❌ Test namespace creation failed");
}
} catch(e) {
print("Note: Namespace creation failed (likely no cluster): " + e);
}
// Test 3: Switch to test namespace and create resources
print("\nTest 3: Creating resources in test namespace...");
try {
let test_km = kubernetes_manager_new(test_ns);
// Create ConfigMap
let config_data = #{
"app.properties": "debug=true\nport=8080",
"config.yaml": "key: value\nenv: test"
};
let configmap_name = test_km.create_configmap("rhai-config", config_data);
print("✓ Created ConfigMap: " + configmap_name);
// Create Secret
let secret_data = #{
"username": "rhaiuser",
"password": "secret456"
};
let secret_name = test_km.create_secret("rhai-secret", secret_data, "Opaque");
print("✓ Created Secret: " + secret_name);
// Create Pod
let pod_labels = #{
"app": "rhai-app",
"version": "v1"
};
let pod_name = test_km.create_pod("rhai-pod", "nginx:alpine", pod_labels);
print("✓ Created Pod: " + pod_name);
// Create Service
let service_selector = #{
"app": "rhai-app"
};
let service_name = test_km.create_service("rhai-service", service_selector, 80, 80);
print("✓ Created Service: " + service_name);
// Create Deployment
let deployment_labels = #{
"app": "rhai-app",
"tier": "frontend"
};
let deployment_name = test_km.create_deployment("rhai-deployment", "nginx:alpine", 2, deployment_labels, #{});
print("✓ Created Deployment: " + deployment_name);
} catch(e) {
print("Note: Resource creation failed (likely no cluster): " + e);
}
// Test 4: Read operations
print("\nTest 4: Reading resources...");
try {
let test_km = kubernetes_manager_new(test_ns);
// List all resources
let pods = pods_list(test_km);
print("✓ Found " + pods.len() + " pods");
let services = services_list(test_km);
print("✓ Found " + services.len() + " services");
let deployments = deployments_list(test_km);
print("✓ Found " + deployments.len() + " deployments");
// Get resource counts
let counts = resource_counts(test_km);
print("✓ Resource counts for " + counts.len() + " resource types");
for resource_type in counts.keys() {
let count = counts[resource_type];
print(" " + resource_type + ": " + count);
}
} catch(e) {
print("Note: Resource reading failed (likely no cluster): " + e);
}
// Test 5: Delete operations
print("\nTest 5: Deleting resources...");
try {
let test_km = kubernetes_manager_new(test_ns);
// Delete individual resources
test_km.delete_pod("rhai-pod");
print("✓ Deleted pod");
test_km.delete_service("rhai-service");
print("✓ Deleted service");
test_km.delete_deployment("rhai-deployment");
print("✓ Deleted deployment");
test_km.delete_configmap("rhai-config");
print("✓ Deleted configmap");
test_km.delete_secret("rhai-secret");
print("✓ Deleted secret");
// Verify cleanup
let final_counts = resource_counts(test_km);
print("✓ Final resource counts:");
for resource_type in final_counts.keys() {
let count = final_counts[resource_type];
print(" " + resource_type + ": " + count);
}
} catch(e) {
print("Note: Resource deletion failed (likely no cluster): " + e);
}
// Test 6: Cleanup test namespace
print("\nTest 6: Cleaning up test namespace...");
try {
km.delete_namespace(test_ns);
print("✓ Deleted test namespace: " + test_ns);
} catch(e) {
print("Note: Namespace deletion failed (likely no cluster): " + e);
}
// Test 7: Function availability check
print("\nTest 7: Checking all CRUD functions are available...");
let crud_functions = [
// Create methods (object-oriented style)
"create_pod",
"create_service",
"create_deployment",
"create_configmap",
"create_secret",
"create_namespace",
// Get methods
"get_pod",
"get_service",
"get_deployment",
// List methods
"pods_list",
"services_list",
"deployments_list",
"configmaps_list",
"secrets_list",
"namespaces_list",
"resource_counts",
"namespace_exists",
// Delete methods
"delete_pod",
"delete_service",
"delete_deployment",
"delete_configmap",
"delete_secret",
"delete_namespace",
"delete"
];
for func_name in crud_functions {
print("✓ Function '" + func_name + "' is available");
}
print("\n=== CRUD Operations Test Summary ===");
print("✅ All " + crud_functions.len() + " CRUD functions are registered");
print("✅ Create operations: 6 functions");
print("✅ Read operations: 8 functions");
print("✅ Delete operations: 7 functions");
print("✅ Total CRUD capabilities: 21 functions");
print("\n🎉 Complete CRUD operations test completed!");
print("\nYour SAL Kubernetes module now supports:");
print(" ✅ Full resource lifecycle management");
print(" ✅ Namespace operations");
print(" ✅ All major Kubernetes resource types");
print(" ✅ Production-ready error handling");
print(" ✅ Rhai scripting integration");

View File

@ -1,199 +0,0 @@
// Rhai test for environment variables functionality
// This test verifies that the enhanced deploy_application function works correctly with environment variables
print("=== Testing Environment Variables in Rhai ===");
// Create Kubernetes manager
print("Creating Kubernetes manager...");
let km = kubernetes_manager_new("default");
print("✓ Kubernetes manager created");
// Test 1: Deploy application with environment variables
print("\n--- Test 1: Deploy with Environment Variables ---");
// Clean up any existing resources
try {
delete_deployment(km, "rhai-env-test");
print("✓ Cleaned up existing deployment");
} catch(e) {
print("✓ No existing deployment to clean up");
}
try {
delete_service(km, "rhai-env-test");
print("✓ Cleaned up existing service");
} catch(e) {
print("✓ No existing service to clean up");
}
// Deploy with both labels and environment variables
try {
let result = deploy_application(km, "rhai-env-test", "nginx:latest", 1, 80, #{
"app": "rhai-env-test",
"test": "environment-variables",
"language": "rhai"
}, #{
"NODE_ENV": "test",
"DATABASE_URL": "postgres://localhost:5432/test",
"API_KEY": "test-api-key-12345",
"LOG_LEVEL": "debug",
"PORT": "80"
});
print("✓ " + result);
} catch(e) {
print("❌ Failed to deploy with env vars: " + e);
throw e;
}
// Verify deployment was created
try {
let deployment_name = get_deployment(km, "rhai-env-test");
print("✓ Deployment verified: " + deployment_name);
} catch(e) {
print("❌ Failed to verify deployment: " + e);
throw e;
}
// Test 2: Deploy application without environment variables
print("\n--- Test 2: Deploy without Environment Variables ---");
// Clean up
try {
delete_deployment(km, "rhai-no-env-test");
delete_service(km, "rhai-no-env-test");
} catch(e) {
// Ignore cleanup errors
}
// Deploy with labels only, empty env vars map
try {
let result = deploy_application(km, "rhai-no-env-test", "nginx:alpine", 1, 8080, #{
"app": "rhai-no-env-test",
"test": "no-environment-variables"
}, #{
// Empty environment variables map
});
print("✓ " + result);
} catch(e) {
print("❌ Failed to deploy without env vars: " + e);
throw e;
}
// Test 3: Deploy with special characters in environment variables
print("\n--- Test 3: Deploy with Special Characters in Env Vars ---");
// Clean up
try {
delete_deployment(km, "rhai-special-env-test");
delete_service(km, "rhai-special-env-test");
} catch(e) {
// Ignore cleanup errors
}
// Deploy with special characters
try {
let result = deploy_application(km, "rhai-special-env-test", "nginx:latest", 1, 3000, #{
"app": "rhai-special-env-test"
}, #{
"DATABASE_URL": "postgres://user:pass@host:5432/db?ssl=true&timeout=30",
"JSON_CONFIG": `{"server": {"port": 3000, "host": "0.0.0.0"}}`,
"SPECIAL_CHARS": "!@#$%^&*()_+-=[]{}|;:,.<>?",
"MULTILINE": "line1\nline2\nline3"
});
print("✓ " + result);
} catch(e) {
print("❌ Failed to deploy with special chars: " + e);
throw e;
}
// Test 4: Test resource listing after deployments
print("\n--- Test 4: Verify Resource Listing ---");
try {
let deployments = deployments_list(km);
print("✓ Found " + deployments.len() + " deployments");
// Check that our test deployments are in the list
let found_env_test = false;
let found_no_env_test = false;
let found_special_test = false;
for deployment in deployments {
if deployment == "rhai-env-test" {
found_env_test = true;
} else if deployment == "rhai-no-env-test" {
found_no_env_test = true;
} else if deployment == "rhai-special-env-test" {
found_special_test = true;
}
}
if found_env_test {
print("✓ Found rhai-env-test deployment");
} else {
print("❌ rhai-env-test deployment not found in list");
}
if found_no_env_test {
print("✓ Found rhai-no-env-test deployment");
} else {
print("❌ rhai-no-env-test deployment not found in list");
}
if found_special_test {
print("✓ Found rhai-special-env-test deployment");
} else {
print("❌ rhai-special-env-test deployment not found in list");
}
} catch(e) {
print("❌ Failed to list deployments: " + e);
}
// Test 5: Test services listing
print("\n--- Test 5: Verify Services ---");
try {
let services = services_list(km);
print("✓ Found " + services.len() + " services");
// Services should be created for each deployment
let service_count = 0;
for service in services {
if service.contains("rhai-") && service.contains("-test") {
service_count = service_count + 1;
print("✓ Found test service: " + service);
}
}
if service_count >= 3 {
print("✓ All expected services found");
} else {
print("⚠️ Expected at least 3 test services, found " + service_count);
}
} catch(e) {
print("❌ Failed to list services: " + e);
}
// Cleanup all test resources
print("\n--- Cleanup ---");
let cleanup_items = ["rhai-env-test", "rhai-no-env-test", "rhai-special-env-test"];
for item in cleanup_items {
try {
delete_deployment(km, item);
print("✓ Deleted deployment: " + item);
} catch(e) {
print("⚠️ Could not delete deployment " + item + ": " + e);
}
try {
delete_service(km, item);
print("✓ Deleted service: " + item);
} catch(e) {
print("⚠️ Could not delete service " + item + ": " + e);
}
}
print("\n=== Environment Variables Rhai Test Complete ===");
print("✅ All tests passed successfully!");

View File

@ -1,85 +0,0 @@
//! Namespace operations test
//!
//! This script tests namespace creation and management operations.
print("=== Namespace Operations Test ===");
// Test 1: Create manager
print("Test 1: Creating KubernetesManager...");
let km = kubernetes_manager_new("default");
print("✓ Manager created for namespace: " + namespace(km));
// Test 2: Namespace existence checks
print("\nTest 2: Testing namespace existence...");
try {
// Test that default namespace exists
let default_exists = namespace_exists(km, "default");
print("✓ Default namespace exists: " + default_exists);
assert(default_exists, "Default namespace should exist");
// Test non-existent namespace
let fake_exists = namespace_exists(km, "definitely-does-not-exist-12345");
print("✓ Non-existent namespace check: " + fake_exists);
assert(!fake_exists, "Non-existent namespace should not exist");
} catch(e) {
print("Note: Namespace existence tests failed (likely no cluster): " + e);
}
// Test 3: Namespace creation (if cluster is available)
print("\nTest 3: Testing namespace creation...");
let test_namespaces = [
"rhai-test-namespace-1",
"rhai-test-namespace-2"
];
for test_ns in test_namespaces {
try {
print("Creating namespace: " + test_ns);
namespace_create(km, test_ns);
print("✓ Created namespace: " + test_ns);
// Verify it exists
let exists = namespace_exists(km, test_ns);
print("✓ Verified namespace exists: " + exists);
// Test idempotent creation
namespace_create(km, test_ns);
print("✓ Idempotent creation successful for: " + test_ns);
} catch(e) {
print("Note: Namespace creation failed for " + test_ns + " (likely no cluster or permissions): " + e);
}
}
// Test 4: List all namespaces
print("\nTest 4: Listing all namespaces...");
try {
let all_namespaces = namespaces_list(km);
print("✓ Found " + all_namespaces.len() + " total namespaces");
// Check for our test namespaces
for test_ns in test_namespaces {
let found = false;
for ns in all_namespaces {
if ns == test_ns {
found = true;
break;
}
}
if found {
print("✓ Found test namespace in list: " + test_ns);
}
}
} catch(e) {
print("Note: Namespace listing failed (likely no cluster): " + e);
}
print("\n--- Cleanup Instructions ---");
print("To clean up test namespaces, run:");
for test_ns in test_namespaces {
print(" kubectl delete namespace " + test_ns);
}
print("\n=== Namespace operations test completed! ===");

View File

@ -1,51 +0,0 @@
//! Test for newly added Rhai functions
//!
//! This script tests the newly added configmaps_list, secrets_list, and delete functions.
print("=== Testing New Rhai Functions ===");
// Test 1: Create manager
print("Test 1: Creating KubernetesManager...");
let km = kubernetes_manager_new("default");
print("✓ Manager created for namespace: " + namespace(km));
// Test 2: Test new listing functions
print("\nTest 2: Testing new listing functions...");
try {
// Test configmaps_list
let configmaps = configmaps_list(km);
print("✓ configmaps_list() works - found " + configmaps.len() + " configmaps");
// Test secrets_list
let secrets = secrets_list(km);
print("✓ secrets_list() works - found " + secrets.len() + " secrets");
} catch(e) {
print("Note: Listing functions failed (likely no cluster): " + e);
print("✓ Functions are registered and callable");
}
// Test 3: Test function availability
print("\nTest 3: Verifying all new functions are available...");
let new_functions = [
"configmaps_list",
"secrets_list",
"configmap_delete",
"secret_delete",
"namespace_delete"
];
for func_name in new_functions {
print("✓ Function '" + func_name + "' is available");
}
print("\n=== New Functions Test Summary ===");
print("✅ All " + new_functions.len() + " new functions are registered");
print("✅ configmaps_list() - List configmaps in namespace");
print("✅ secrets_list() - List secrets in namespace");
print("✅ configmap_delete() - Delete specific configmap");
print("✅ secret_delete() - Delete specific secret");
print("✅ namespace_delete() - Delete namespace");
print("\n🎉 All new Rhai functions are working correctly!");

View File

@ -1,142 +0,0 @@
// Rhai test for pod creation with environment variables functionality
// This test verifies that the enhanced pod_create function works correctly with environment variables
print("=== Testing Pod Environment Variables in Rhai ===");
// Create Kubernetes manager
print("Creating Kubernetes manager...");
let km = kubernetes_manager_new("default");
print("✓ Kubernetes manager created");
// Test 1: Create pod with environment variables
print("\n--- Test 1: Create Pod with Environment Variables ---");
// Clean up any existing resources
try {
delete_pod(km, "rhai-pod-env-test");
print("✓ Cleaned up existing pod");
} catch(e) {
print("✓ No existing pod to clean up");
}
// Create pod with both labels and environment variables
try {
let result = km.create_pod_with_env("rhai-pod-env-test", "nginx:latest", #{
"app": "rhai-pod-env-test",
"test": "pod-environment-variables",
"language": "rhai"
}, #{
"NODE_ENV": "test",
"DATABASE_URL": "postgres://localhost:5432/test",
"API_KEY": "test-api-key-12345",
"LOG_LEVEL": "debug",
"PORT": "80"
});
print("✓ Created pod with environment variables: " + result);
} catch(e) {
print("❌ Failed to create pod with env vars: " + e);
throw e;
}
// Test 2: Create pod without environment variables
print("\n--- Test 2: Create Pod without Environment Variables ---");
try {
delete_pod(km, "rhai-pod-no-env-test");
} catch(e) {
// Ignore cleanup errors
}
try {
let result = km.create_pod("rhai-pod-no-env-test", "nginx:latest", #{
"app": "rhai-pod-no-env-test",
"test": "no-environment-variables"
});
print("✓ Created pod without environment variables: " + result);
} catch(e) {
print("❌ Failed to create pod without env vars: " + e);
throw e;
}
// Test 3: Create pod with special characters in env vars
print("\n--- Test 3: Create Pod with Special Characters in Env Vars ---");
try {
delete_pod(km, "rhai-pod-special-env-test");
} catch(e) {
// Ignore cleanup errors
}
try {
let result = km.create_pod_with_env("rhai-pod-special-env-test", "nginx:latest", #{
"app": "rhai-pod-special-env-test"
}, #{
"SPECIAL_CHARS": "Hello, World! @#$%^&*()",
"JSON_CONFIG": "{\"key\": \"value\", \"number\": 123}",
"URL_WITH_PARAMS": "https://api.example.com/v1/data?param1=value1&param2=value2"
});
print("✓ Created pod with special characters in env vars: " + result);
} catch(e) {
print("❌ Failed to create pod with special env vars: " + e);
throw e;
}
// Test 4: Verify resource listing
print("\n--- Test 4: Verify Pod Listing ---");
try {
let pods = pods_list(km);
print("✓ Found " + pods.len() + " pods");
let found_env_test = false;
let found_no_env_test = false;
let found_special_env_test = false;
for pod in pods {
if pod.contains("rhai-pod-env-test") {
found_env_test = true;
print("✓ Found rhai-pod-env-test pod");
}
if pod.contains("rhai-pod-no-env-test") {
found_no_env_test = true;
print("✓ Found rhai-pod-no-env-test pod");
}
if pod.contains("rhai-pod-special-env-test") {
found_special_env_test = true;
print("✓ Found rhai-pod-special-env-test pod");
}
}
if found_env_test && found_no_env_test && found_special_env_test {
print("✓ All expected pods found");
} else {
print("❌ Some expected pods not found");
}
} catch(e) {
print("❌ Failed to list pods: " + e);
}
// Cleanup
print("\n--- Cleanup ---");
try {
delete_pod(km, "rhai-pod-env-test");
print("✓ Deleted pod: rhai-pod-env-test");
} catch(e) {
print("⚠ Failed to delete rhai-pod-env-test: " + e);
}
try {
delete_pod(km, "rhai-pod-no-env-test");
print("✓ Deleted pod: rhai-pod-no-env-test");
} catch(e) {
print("⚠ Failed to delete rhai-pod-no-env-test: " + e);
}
try {
delete_pod(km, "rhai-pod-special-env-test");
print("✓ Deleted pod: rhai-pod-special-env-test");
} catch(e) {
print("⚠ Failed to delete rhai-pod-special-env-test: " + e);
}
print("\n=== Pod Environment Variables Rhai Test Complete ===");
print("✅ All tests passed successfully!");

View File

@ -1,137 +0,0 @@
//! Resource management test
//!
//! This script tests resource listing and management operations.
print("=== Resource Management Test ===");
// Test 1: Create manager
print("Test 1: Creating KubernetesManager...");
let km = kubernetes_manager_new("default");
print("✓ Manager created for namespace: " + namespace(km));
// Test 2: Resource listing
print("\nTest 2: Testing resource listing...");
try {
// Test pods listing
let pods = pods_list(km);
print("✓ Pods list: " + pods.len() + " pods found");
// Test services listing
let services = services_list(km);
print("✓ Services list: " + services.len() + " services found");
// Test deployments listing
let deployments = deployments_list(km);
print("✓ Deployments list: " + deployments.len() + " deployments found");
// Show some pod names if available
if pods.len() > 0 {
print("Sample pods:");
let count = 0;
for pod in pods {
if count < 3 {
print(" - " + pod);
count = count + 1;
}
}
}
} catch(e) {
print("Note: Resource listing failed (likely no cluster): " + e);
}
// Test 3: Resource counts
print("\nTest 3: Testing resource counts...");
try {
let counts = resource_counts(km);
print("✓ Resource counts retrieved for " + counts.len() + " resource types");
// Display counts
for resource_type in counts.keys() {
let count = counts[resource_type];
print(" " + resource_type + ": " + count);
}
// Verify expected resource types are present
let expected_types = ["pods", "services", "deployments", "configmaps", "secrets"];
for expected_type in expected_types {
if expected_type in counts {
print("✓ Found expected resource type: " + expected_type);
} else {
print("⚠ Missing expected resource type: " + expected_type);
}
}
} catch(e) {
print("Note: Resource counts failed (likely no cluster): " + e);
}
// Test 4: Multi-namespace comparison
print("\nTest 4: Multi-namespace resource comparison...");
let test_namespaces = ["default", "kube-system"];
let total_resources = #{};
for ns in test_namespaces {
try {
let ns_km = kubernetes_manager_new(ns);
let counts = resource_counts(ns_km);
print("Namespace '" + ns + "':");
let ns_total = 0;
for resource_type in counts.keys() {
let count = counts[resource_type];
print(" " + resource_type + ": " + count);
ns_total = ns_total + count;
// Accumulate totals
if resource_type in total_resources {
total_resources[resource_type] = total_resources[resource_type] + count;
} else {
total_resources[resource_type] = count;
}
}
print(" Total: " + ns_total + " resources");
} catch(e) {
print("Note: Failed to analyze namespace '" + ns + "': " + e);
}
}
// Show totals
print("\nTotal resources across all namespaces:");
let grand_total = 0;
for resource_type in total_resources.keys() {
let count = total_resources[resource_type];
print(" " + resource_type + ": " + count);
grand_total = grand_total + count;
}
print("Grand total: " + grand_total + " resources");
// Test 5: Pattern matching simulation
print("\nTest 5: Pattern matching simulation...");
try {
let pods = pods_list(km);
print("Testing pattern matching on " + pods.len() + " pods:");
// Simulate pattern matching (since Rhai doesn't have regex)
let test_patterns = ["test", "kube", "system", "app"];
for pattern in test_patterns {
let matches = [];
for pod in pods {
if pod.contains(pattern) {
matches.push(pod);
}
}
print(" Pattern '" + pattern + "' would match " + matches.len() + " pods");
if matches.len() > 0 && matches.len() <= 3 {
for match in matches {
print(" - " + match);
}
}
}
} catch(e) {
print("Note: Pattern matching test failed (likely no cluster): " + e);
}
print("\n=== Resource management test completed! ===");

View File

@ -1,92 +0,0 @@
//! Run all Kubernetes Rhai tests
//!
//! This script runs all the Kubernetes Rhai tests in sequence.
print("=== Running All Kubernetes Rhai Tests ===");
print("");
// Test configuration
let test_files = [
"basic_kubernetes.rhai",
"namespace_operations.rhai",
"resource_management.rhai",
"env_vars_test.rhai"
];
let passed_tests = 0;
let total_tests = test_files.len();
print("Found " + total_tests + " test files to run:");
for test_file in test_files {
print(" - " + test_file);
}
print("");
// Note: In a real implementation, we would use eval_file or similar
// For now, this serves as documentation of the test structure
print("=== Test Execution Summary ===");
print("");
print("To run these tests individually:");
for test_file in test_files {
print(" herodo kubernetes/tests/rhai/" + test_file);
}
print("");
print("To run with Kubernetes cluster:");
print(" KUBERNETES_TEST_ENABLED=1 herodo kubernetes/tests/rhai/basic_kubernetes.rhai");
print("");
// Basic validation that we can create a manager
print("=== Quick Validation ===");
try {
let km = kubernetes_manager_new("default");
let ns = namespace(km);
print("✓ KubernetesManager creation works");
print("✓ Namespace getter works: " + ns);
passed_tests = passed_tests + 1;
} catch(e) {
print("✗ Basic validation failed: " + e);
}
// Test function registration
print("");
print("=== Function Registration Check ===");
let required_functions = [
"kubernetes_manager_new",
"namespace",
"pods_list",
"services_list",
"deployments_list",
"namespaces_list",
"resource_counts",
"namespace_create",
"namespace_exists",
"delete",
"pod_delete",
"service_delete",
"deployment_delete",
"deploy_application"
];
let registered_functions = 0;
for func_name in required_functions {
// We can't easily test function existence in Rhai, but we can document them
print("✓ " + func_name + " should be registered");
registered_functions = registered_functions + 1;
}
print("");
print("=== Summary ===");
print("Required functions: " + registered_functions + "/" + required_functions.len());
if passed_tests > 0 {
print("Basic validation: PASSED");
} else {
print("Basic validation: FAILED");
}
print("");
print("For full testing with a Kubernetes cluster:");
print("1. Ensure you have a running Kubernetes cluster");
print("2. Set KUBERNETES_TEST_ENABLED=1");
print("3. Run individual test files");
print("");
print("=== All tests documentation completed ===");

View File

@ -1,90 +0,0 @@
//! Simple API pattern test
//!
//! This script demonstrates the new object-oriented API pattern.
print("=== Object-Oriented API Pattern Test ===");
// Test 1: Create manager
print("Test 1: Creating KubernetesManager...");
let km = kubernetes_manager_new("default");
print("✓ Manager created for namespace: " + namespace(km));
// Test 2: Show the new API pattern
print("\nTest 2: New Object-Oriented API Pattern");
print("Now you can use:");
print(" km.create_pod(name, image, labels)");
print(" km.create_service(name, selector, port, target_port)");
print(" km.create_deployment(name, image, replicas, labels)");
print(" km.create_configmap(name, data)");
print(" km.create_secret(name, data, type)");
print(" km.create_namespace(name)");
print("");
print(" km.get_pod(name)");
print(" km.get_service(name)");
print(" km.get_deployment(name)");
print("");
print(" km.delete_pod(name)");
print(" km.delete_service(name)");
print(" km.delete_deployment(name)");
print(" km.delete_configmap(name)");
print(" km.delete_secret(name)");
print(" km.delete_namespace(name)");
print("");
print(" km.pods_list()");
print(" km.services_list()");
print(" km.deployments_list()");
print(" km.resource_counts()");
print(" km.namespace_exists(name)");
// Test 3: Function availability check
print("\nTest 3: Checking all API methods are available...");
let api_methods = [
// Create methods
"create_pod",
"create_service",
"create_deployment",
"create_configmap",
"create_secret",
"create_namespace",
// Get methods
"get_pod",
"get_service",
"get_deployment",
// List methods
"pods_list",
"services_list",
"deployments_list",
"configmaps_list",
"secrets_list",
"namespaces_list",
"resource_counts",
"namespace_exists",
// Delete methods
"delete_pod",
"delete_service",
"delete_deployment",
"delete_configmap",
"delete_secret",
"delete_namespace",
"delete"
];
for method_name in api_methods {
print("✓ Method 'km." + method_name + "()' is available");
}
print("\n=== API Pattern Summary ===");
print("✅ Object-oriented API: km.method_name()");
print("✅ " + api_methods.len() + " methods available");
print("✅ Consistent naming: create_*, get_*, delete_*, *_list()");
print("✅ Full CRUD operations for all resource types");
print("\n🎉 Object-oriented API pattern is ready!");
print("\nExample usage:");
print(" let km = kubernetes_manager_new('my-namespace');");
print(" let pod = km.create_pod('my-pod', 'nginx:latest', #{});");
print(" let pods = km.pods_list();");
print(" km.delete_pod('my-pod');");

View File

@ -1,405 +0,0 @@
//! Rhai integration tests for SAL Kubernetes
//!
//! These tests verify that the Rhai wrappers work correctly and can execute
//! the Rhai test scripts in the tests/rhai/ directory.
#[cfg(feature = "rhai")]
mod rhai_tests {
use rhai::Engine;
use sal_kubernetes::rhai::*;
use std::fs;
use std::path::Path;
/// Check if Kubernetes integration tests should run
fn should_run_k8s_tests() -> bool {
std::env::var("KUBERNETES_TEST_ENABLED").unwrap_or_default() == "1"
}
#[test]
fn test_register_kubernetes_module() {
let mut engine = Engine::new();
let result = register_kubernetes_module(&mut engine);
assert!(
result.is_ok(),
"Failed to register Kubernetes module: {:?}",
result
);
}
#[test]
fn test_kubernetes_functions_registered() {
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Test that the constructor function is registered
let script = r#"
let result = "";
try {
let km = kubernetes_manager_new("test");
result = "constructor_exists";
} catch(e) {
result = "constructor_exists_but_failed";
}
result
"#;
let result = engine.eval::<String>(script);
assert!(result.is_ok());
let result_value = result.unwrap();
assert!(
result_value == "constructor_exists" || result_value == "constructor_exists_but_failed",
"Expected constructor to be registered, got: {}",
result_value
);
}
#[test]
fn test_new_rhai_functions_registered() {
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Test that the newly added functions are registered
let new_functions_to_test = [
"configmaps_list",
"secrets_list",
"configmap_delete",
"secret_delete",
"namespace_delete",
];
for func_name in &new_functions_to_test {
// Try to compile a script that references the function
let script = format!("fn test() {{ {}; }}", func_name);
let result = engine.compile(&script);
assert!(
result.is_ok(),
"New function '{}' should be registered but compilation failed: {:?}",
func_name,
result
);
}
}
#[test]
fn test_rhai_function_signatures() {
if !should_run_k8s_tests() {
println!(
"Skipping Rhai function signature tests. Set KUBERNETES_TEST_ENABLED=1 to enable."
);
return;
}
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Test that the new object-oriented API methods work correctly
// These will fail without a cluster, but should not fail due to missing methods
let test_scripts = vec![
// List methods (still function-based for listing)
("pods_list", "let km = kubernetes_manager_new(\"test\"); km.pods_list();"),
("services_list", "let km = kubernetes_manager_new(\"test\"); km.services_list();"),
("deployments_list", "let km = kubernetes_manager_new(\"test\"); km.deployments_list();"),
("namespaces_list", "let km = kubernetes_manager_new(\"test\"); km.namespaces_list();"),
("resource_counts", "let km = kubernetes_manager_new(\"test\"); km.resource_counts();"),
// Create methods (object-oriented)
("create_namespace", "let km = kubernetes_manager_new(\"test\"); km.create_namespace(\"test-ns\");"),
("create_pod", "let km = kubernetes_manager_new(\"test\"); km.create_pod(\"test-pod\", \"nginx\", #{});"),
("create_service", "let km = kubernetes_manager_new(\"test\"); km.create_service(\"test-svc\", #{}, 80, 80);"),
// Get methods (object-oriented)
("get_pod", "let km = kubernetes_manager_new(\"test\"); km.get_pod(\"test-pod\");"),
("get_service", "let km = kubernetes_manager_new(\"test\"); km.get_service(\"test-svc\");"),
// Delete methods (object-oriented)
("delete_pod", "let km = kubernetes_manager_new(\"test\"); km.delete_pod(\"test-pod\");"),
("delete_service", "let km = kubernetes_manager_new(\"test\"); km.delete_service(\"test-service\");"),
("delete_deployment", "let km = kubernetes_manager_new(\"test\"); km.delete_deployment(\"test-deployment\");"),
("delete_namespace", "let km = kubernetes_manager_new(\"test\"); km.delete_namespace(\"test-ns\");"),
// Utility methods
("namespace_exists", "let km = kubernetes_manager_new(\"test\"); km.namespace_exists(\"test-ns\");"),
("namespace", "let km = kubernetes_manager_new(\"test\"); namespace(km);"),
("delete_pattern", "let km = kubernetes_manager_new(\"test\"); km.delete(\"test-.*\");"),
];
for (function_name, script) in test_scripts {
println!("Testing function: {}", function_name);
let result = engine.eval::<rhai::Dynamic>(script);
// The function should be registered (not get a "function not found" error)
// It may fail due to no Kubernetes cluster, but that's expected
match result {
Ok(_) => {
println!("Function {} executed successfully", function_name);
}
Err(e) => {
let error_msg = e.to_string();
// Should not be a "function not found" error
assert!(
!error_msg.contains("Function not found")
&& !error_msg.contains("Unknown function"),
"Function {} not registered: {}",
function_name,
error_msg
);
println!(
"Function {} failed as expected (no cluster): {}",
function_name, error_msg
);
}
}
}
}
#[test]
fn test_rhai_with_real_cluster() {
if !should_run_k8s_tests() {
println!("Skipping Rhai Kubernetes integration tests. Set KUBERNETES_TEST_ENABLED=1 to enable.");
return;
}
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Test basic functionality with a real cluster
let script = r#"
let km = kubernetes_manager_new("default");
let ns = namespace(km);
ns
"#;
let result = engine.eval::<String>(script);
match result {
Ok(namespace) => {
assert_eq!(namespace, "default");
println!("Successfully got namespace from Rhai: {}", namespace);
}
Err(e) => {
println!("Failed to execute Rhai script with real cluster: {}", e);
// Don't fail the test if we can't connect to cluster
}
}
}
#[test]
fn test_rhai_pods_list() {
if !should_run_k8s_tests() {
return;
}
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
let script = r#"
let km = kubernetes_manager_new("default");
let pods = pods_list(km);
pods.len()
"#;
let result = engine.eval::<i64>(script);
match result {
Ok(count) => {
assert!(count >= 0);
println!("Successfully listed {} pods from Rhai", count);
}
Err(e) => {
println!("Failed to list pods from Rhai: {}", e);
// Don't fail the test if we can't connect to cluster
}
}
}
#[test]
fn test_rhai_resource_counts() {
if !should_run_k8s_tests() {
return;
}
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
let script = r#"
let km = kubernetes_manager_new("default");
let counts = resource_counts(km);
counts
"#;
let result = engine.eval::<rhai::Map>(script);
match result {
Ok(counts) => {
println!("Successfully got resource counts from Rhai: {:?}", counts);
// Verify expected keys are present
assert!(counts.contains_key("pods"));
assert!(counts.contains_key("services"));
assert!(counts.contains_key("deployments"));
}
Err(e) => {
println!("Failed to get resource counts from Rhai: {}", e);
// Don't fail the test if we can't connect to cluster
}
}
}
#[test]
fn test_rhai_namespace_operations() {
if !should_run_k8s_tests() {
return;
}
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Test namespace existence check
let script = r#"
let km = kubernetes_manager_new("default");
let exists = namespace_exists(km, "default");
exists
"#;
let result = engine.eval::<bool>(script);
match result {
Ok(exists) => {
assert!(exists, "Default namespace should exist");
println!(
"Successfully checked namespace existence from Rhai: {}",
exists
);
}
Err(e) => {
println!("Failed to check namespace existence from Rhai: {}", e);
// Don't fail the test if we can't connect to cluster
}
}
}
#[test]
fn test_rhai_error_handling() {
if !should_run_k8s_tests() {
println!(
"Skipping Rhai error handling tests. Set KUBERNETES_TEST_ENABLED=1 to enable."
);
return;
}
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Test that errors are properly converted to Rhai errors
// Use a namespace that will definitely cause an error when trying to list pods
let script = r#"
let km = kubernetes_manager_new("nonexistent-namespace-12345");
pods_list(km)
"#;
let result = engine.eval::<rhai::Array>(script);
// The test might succeed if no cluster is available, which is fine
match result {
Ok(_) => {
println!("No error occurred - possibly no cluster available, which is acceptable");
}
Err(e) => {
let error_msg = e.to_string();
println!("Got expected error: {}", error_msg);
assert!(
error_msg.contains("Kubernetes error")
|| error_msg.contains("error")
|| error_msg.contains("not found")
);
}
}
}
#[test]
fn test_rhai_script_files_exist() {
// Test that our Rhai test files exist and are readable
let test_files = [
"tests/rhai/basic_kubernetes.rhai",
"tests/rhai/namespace_operations.rhai",
"tests/rhai/resource_management.rhai",
"tests/rhai/run_all_tests.rhai",
];
for test_file in test_files {
let path = Path::new(test_file);
assert!(path.exists(), "Rhai test file should exist: {}", test_file);
// Try to read the file to ensure it's valid
let content = fs::read_to_string(path)
.unwrap_or_else(|e| panic!("Failed to read {}: {}", test_file, e));
assert!(
!content.is_empty(),
"Rhai test file should not be empty: {}",
test_file
);
assert!(
content.contains("print("),
"Rhai test file should contain print statements: {}",
test_file
);
}
}
#[test]
fn test_basic_rhai_script_syntax() {
// Test that we can at least parse our basic Rhai script
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Simple script that should parse without errors
let script = r#"
print("Testing Kubernetes Rhai integration");
let functions = ["kubernetes_manager_new", "pods_list", "namespace"];
for func in functions {
print("Function: " + func);
}
print("Basic syntax test completed");
"#;
let result = engine.eval::<()>(script);
assert!(
result.is_ok(),
"Basic Rhai script should parse and execute: {:?}",
result
);
}
#[test]
fn test_rhai_script_execution_with_cluster() {
if !should_run_k8s_tests() {
println!(
"Skipping Rhai script execution test. Set KUBERNETES_TEST_ENABLED=1 to enable."
);
return;
}
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Try to execute a simple script that creates a manager
let script = r#"
let km = kubernetes_manager_new("default");
let ns = namespace(km);
print("Created manager for namespace: " + ns);
ns
"#;
let result = engine.eval::<String>(script);
match result {
Ok(namespace) => {
assert_eq!(namespace, "default");
println!("Successfully executed Rhai script with cluster");
}
Err(e) => {
println!(
"Rhai script execution failed (expected if no cluster): {}",
e
);
// Don't fail the test if we can't connect to cluster
}
}
}
}

View File

@ -1,303 +0,0 @@
//! Unit tests for SAL Kubernetes
//!
//! These tests focus on testing individual components and error handling
//! without requiring a live Kubernetes cluster.
use sal_kubernetes::KubernetesError;
#[test]
fn test_kubernetes_error_creation() {
let config_error = KubernetesError::config_error("Test config error");
assert!(matches!(config_error, KubernetesError::ConfigError(_)));
assert_eq!(
config_error.to_string(),
"Configuration error: Test config error"
);
let operation_error = KubernetesError::operation_error("Test operation error");
assert!(matches!(
operation_error,
KubernetesError::OperationError(_)
));
assert_eq!(
operation_error.to_string(),
"Operation failed: Test operation error"
);
let namespace_error = KubernetesError::namespace_error("Test namespace error");
assert!(matches!(
namespace_error,
KubernetesError::NamespaceError(_)
));
assert_eq!(
namespace_error.to_string(),
"Namespace error: Test namespace error"
);
let permission_error = KubernetesError::permission_denied("Test permission error");
assert!(matches!(
permission_error,
KubernetesError::PermissionDenied(_)
));
assert_eq!(
permission_error.to_string(),
"Permission denied: Test permission error"
);
let timeout_error = KubernetesError::timeout("Test timeout error");
assert!(matches!(timeout_error, KubernetesError::Timeout(_)));
assert_eq!(
timeout_error.to_string(),
"Operation timed out: Test timeout error"
);
}
#[test]
fn test_regex_error_conversion() {
use regex::Regex;
// Test invalid regex pattern
let invalid_pattern = "[invalid";
let regex_result = Regex::new(invalid_pattern);
assert!(regex_result.is_err());
// Convert to KubernetesError
let k8s_error = KubernetesError::from(regex_result.unwrap_err());
assert!(matches!(k8s_error, KubernetesError::RegexError(_)));
}
#[test]
fn test_error_display() {
let errors = vec![
KubernetesError::config_error("Config test"),
KubernetesError::operation_error("Operation test"),
KubernetesError::namespace_error("Namespace test"),
KubernetesError::permission_denied("Permission test"),
KubernetesError::timeout("Timeout test"),
];
for error in errors {
let error_string = error.to_string();
assert!(!error_string.is_empty());
assert!(error_string.contains("test"));
}
}
#[cfg(feature = "rhai")]
#[test]
fn test_rhai_module_registration() {
use rhai::Engine;
use sal_kubernetes::rhai::register_kubernetes_module;
let mut engine = Engine::new();
let result = register_kubernetes_module(&mut engine);
assert!(
result.is_ok(),
"Failed to register Kubernetes module: {:?}",
result
);
}
#[cfg(feature = "rhai")]
#[test]
fn test_rhai_functions_registered() {
use rhai::Engine;
use sal_kubernetes::rhai::register_kubernetes_module;
let mut engine = Engine::new();
register_kubernetes_module(&mut engine).unwrap();
// Test that functions are registered by checking if they exist in the engine
// We can't actually call async functions without a runtime, so we just verify registration
// Check that the main functions are registered by looking for them in the engine
let function_names = vec![
"kubernetes_manager_new",
"pods_list",
"services_list",
"deployments_list",
"delete",
"namespace_create",
"namespace_exists",
];
for function_name in function_names {
// Try to parse a script that references the function
// This will succeed if the function is registered, even if we don't call it
let script = format!("let f = {};", function_name);
let result = engine.compile(&script);
assert!(
result.is_ok(),
"Function '{}' should be registered in the engine",
function_name
);
}
}
#[test]
fn test_namespace_validation() {
// Test valid namespace names
let valid_names = vec!["default", "kube-system", "my-app", "test123"];
for name in valid_names {
assert!(!name.is_empty());
assert!(name.chars().all(|c| c.is_alphanumeric() || c == '-'));
}
}
#[test]
fn test_resource_name_patterns() {
use regex::Regex;
// Test common patterns that might be used with the delete function
let patterns = vec![
r"test-.*", // Match anything starting with "test-"
r".*-temp$", // Match anything ending with "-temp"
r"^pod-\d+$", // Match "pod-" followed by digits
r"app-[a-z]+", // Match "app-" followed by lowercase letters
];
for pattern in patterns {
let regex = Regex::new(pattern);
assert!(regex.is_ok(), "Pattern '{}' should be valid", pattern);
let regex = regex.unwrap();
// Test some example matches based on the pattern
match pattern {
r"test-.*" => {
assert!(regex.is_match("test-pod"));
assert!(regex.is_match("test-service"));
assert!(!regex.is_match("prod-pod"));
}
r".*-temp$" => {
assert!(regex.is_match("my-pod-temp"));
assert!(regex.is_match("service-temp"));
assert!(!regex.is_match("temp-pod"));
}
r"^pod-\d+$" => {
assert!(regex.is_match("pod-123"));
assert!(regex.is_match("pod-1"));
assert!(!regex.is_match("pod-abc"));
assert!(!regex.is_match("service-123"));
}
r"app-[a-z]+" => {
assert!(regex.is_match("app-frontend"));
assert!(regex.is_match("app-backend"));
assert!(!regex.is_match("app-123"));
assert!(!regex.is_match("service-frontend"));
}
_ => {}
}
}
}
#[test]
fn test_invalid_regex_patterns() {
use regex::Regex;
// Test invalid regex patterns that should fail
let invalid_patterns = vec![
"[invalid", // Unclosed bracket
"*invalid", // Invalid quantifier
"(?invalid)", // Invalid group
"\\", // Incomplete escape
];
for pattern in invalid_patterns {
let regex = Regex::new(pattern);
assert!(regex.is_err(), "Pattern '{}' should be invalid", pattern);
}
}
#[test]
fn test_kubernetes_config_creation() {
use sal_kubernetes::KubernetesConfig;
use std::time::Duration;
// Test default configuration
let default_config = KubernetesConfig::default();
assert_eq!(default_config.operation_timeout, Duration::from_secs(30));
assert_eq!(default_config.max_retries, 3);
assert_eq!(default_config.rate_limit_rps, 10);
assert_eq!(default_config.rate_limit_burst, 20);
// Test custom configuration
let custom_config = KubernetesConfig::new()
.with_timeout(Duration::from_secs(60))
.with_retries(5, Duration::from_secs(2), Duration::from_secs(60))
.with_rate_limit(50, 100);
assert_eq!(custom_config.operation_timeout, Duration::from_secs(60));
assert_eq!(custom_config.max_retries, 5);
assert_eq!(custom_config.retry_base_delay, Duration::from_secs(2));
assert_eq!(custom_config.retry_max_delay, Duration::from_secs(60));
assert_eq!(custom_config.rate_limit_rps, 50);
assert_eq!(custom_config.rate_limit_burst, 100);
// Test pre-configured profiles
let high_throughput = KubernetesConfig::high_throughput();
assert_eq!(high_throughput.rate_limit_rps, 50);
assert_eq!(high_throughput.rate_limit_burst, 100);
let low_latency = KubernetesConfig::low_latency();
assert_eq!(low_latency.operation_timeout, Duration::from_secs(10));
assert_eq!(low_latency.max_retries, 2);
let development = KubernetesConfig::development();
assert_eq!(development.operation_timeout, Duration::from_secs(120));
assert_eq!(development.rate_limit_rps, 100);
}
#[test]
fn test_retryable_error_detection() {
use kube::Error as KubeError;
use sal_kubernetes::kubernetes_manager::is_retryable_error;
// Test that the function exists and works with basic error types
// Note: We can't easily create all error types, so we test what we can
// Test API errors with different status codes
let api_error_500 = KubeError::Api(kube::core::ErrorResponse {
status: "Failure".to_string(),
message: "Internal server error".to_string(),
reason: "InternalError".to_string(),
code: 500,
});
assert!(
is_retryable_error(&api_error_500),
"500 errors should be retryable"
);
let api_error_429 = KubeError::Api(kube::core::ErrorResponse {
status: "Failure".to_string(),
message: "Too many requests".to_string(),
reason: "TooManyRequests".to_string(),
code: 429,
});
assert!(
is_retryable_error(&api_error_429),
"429 errors should be retryable"
);
let api_error_404 = KubeError::Api(kube::core::ErrorResponse {
status: "Failure".to_string(),
message: "Not found".to_string(),
reason: "NotFound".to_string(),
code: 404,
});
assert!(
!is_retryable_error(&api_error_404),
"404 errors should not be retryable"
);
let api_error_400 = KubeError::Api(kube::core::ErrorResponse {
status: "Failure".to_string(),
message: "Bad request".to_string(),
reason: "BadRequest".to_string(),
code: 400,
});
assert!(
!is_retryable_error(&api_error_400),
"400 errors should not be retryable"
);
}

View File

@ -1,16 +1,7 @@
# SAL Mycelium (`sal-mycelium`)
# SAL Mycelium
A Rust client library for interacting with Mycelium node's HTTP API, with Rhai scripting support.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-mycelium = "0.1.0"
```
## Overview
SAL Mycelium provides async HTTP client functionality for managing Mycelium nodes, including:

View File

@ -1,16 +1,7 @@
# SAL Network Package (`sal-net`)
# SAL Network Package
Network connectivity utilities for TCP, HTTP, and SSH operations.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-net = "0.1.0"
```
## Overview
The `sal-net` package provides a comprehensive set of network connectivity tools for the SAL (System Abstraction Layer) ecosystem. It includes utilities for TCP port checking, HTTP/HTTPS connectivity testing, and SSH command execution.

View File

@ -165,18 +165,9 @@ fn test_mv() {
#[test]
fn test_which() {
// Test with a command that should exist on all systems
#[cfg(target_os = "windows")]
let existing_cmd = "cmd";
#[cfg(not(target_os = "windows"))]
let existing_cmd = "ls";
let result = fs::which(existing_cmd);
assert!(
!result.is_empty(),
"Command '{}' should exist",
existing_cmd
);
// Test with a command that should exist on most systems
let result = fs::which("ls");
assert!(!result.is_empty());
// Test with a command that shouldn't exist
let result = fs::which("nonexistentcommand12345");

View File

@ -1,16 +1,7 @@
# SAL PostgreSQL Client (`sal-postgresclient`)
# SAL PostgreSQL Client
The SAL PostgreSQL Client (`sal-postgresclient`) is an independent package that provides a simple and efficient way to interact with PostgreSQL databases in Rust. It offers connection management, query execution, a builder pattern for flexible configuration, and PostgreSQL installer functionality using nerdctl.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-postgresclient = "0.1.0"
```
## Features
- **Connection Management**: Automatic connection handling and reconnection

View File

@ -17,7 +17,7 @@ Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-process = "0.1.0"
sal-process = { path = "../process" }
```
## Usage

View File

@ -138,12 +138,7 @@ fn test_run_with_environment_variables() {
#[test]
fn test_run_with_working_directory() {
// Test that commands run in the current working directory
#[cfg(target_os = "windows")]
let result = run_command("cd").unwrap();
#[cfg(not(target_os = "windows"))]
let result = run_command("pwd").unwrap();
assert!(result.success);
assert!(!result.stdout.is_empty());
}
@ -205,16 +200,6 @@ fn test_run_script_with_variables() {
#[test]
fn test_run_script_with_conditionals() {
#[cfg(target_os = "windows")]
let script = r#"
if "hello"=="hello" (
echo Condition passed
) else (
echo Condition failed
)
"#;
#[cfg(not(target_os = "windows"))]
let script = r#"
if [ "hello" = "hello" ]; then
echo "Condition passed"
@ -230,14 +215,6 @@ fn test_run_script_with_conditionals() {
#[test]
fn test_run_script_with_loops() {
#[cfg(target_os = "windows")]
let script = r#"
for %%i in (1 2 3) do (
echo Number: %%i
)
"#;
#[cfg(not(target_os = "windows"))]
let script = r#"
for i in 1 2 3; do
echo "Number: $i"

View File

@ -1,16 +1,7 @@
# SAL Redis Client (`sal-redisclient`)
# Redis Client Module
A robust Redis client wrapper for Rust applications that provides connection management, automatic reconnection, and a simple interface for executing Redis commands.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-redisclient = "0.1.0"
```
## Features
- **Singleton Pattern**: Maintains a global Redis client instance, so we don't re-int all the time.

2
rfs-client/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/target/
**/*.rs.bk

26
rfs-client/Cargo.toml Normal file
View File

@ -0,0 +1,26 @@
[package]
name = "sal-rfs-client"
version = "0.1.0"
edition = "2021"
description = "SAL RFS Client - Client library for Remote File System server"
repository = "https://git.threefold.info/herocode/sal"
license = "Apache-2.0"
keywords = ["rfs", "client", "filesystem", "remote"]
categories = ["filesystem", "api-bindings"]
[dependencies]
openapi = { path = "./openapi" }
thiserror.workspace = true
url.workspace = true
reqwest = { workspace = true, features = ["json", "multipart"] }
tokio = { workspace = true, features = ["full"] }
serde = { workspace = true, features = ["derive"] }
serde_json.workspace = true
log.workspace = true
bytes.workspace = true
futures.workspace = true
rhai.workspace = true
lazy_static.workspace = true
[dev-dependencies]
tempfile = "3.0"

195
rfs-client/README.md Normal file
View File

@ -0,0 +1,195 @@
# RFS Client
A Rust client library for interacting with the Remote File System (RFS) server.
## Overview
This client library provides a user-friendly wrapper around the OpenAPI-generated client code. It offers high-level abstractions for common operations such as:
- Authentication and session management
- File uploads and downloads with progress tracking
- Block-level operations and verification
- FList creation, monitoring, and management
- Timeout configuration and error handling
## Structure
The library is organized as follows:
- `client.rs`: Main client implementation with methods for interacting with the RFS server
- `error.rs`: Error types and handling
- `types.rs`: Type definitions and utilities
## Quick Start
```rust
use rfs_client::RfsClient;
use rfs_client::types::{ClientConfig, Credentials};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with custom configuration
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate
client.authenticate().await?;
println!("Authentication successful");
// Upload a file
let file_path = "/path/to/file.txt";
let file_hash = client.upload_file(file_path, None).await?;
println!("File uploaded with hash: {}", file_hash);
// Download the file
let output_path = "/path/to/output.txt";
client.download_file(&file_hash, output_path, None).await?;
println!("File downloaded to {}", output_path);
Ok(())
}
```
## Feature Examples
### Authentication
```rust
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 30,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
if client.is_authenticated() {
println!("Authentication successful");
}
```
### File Management
```rust
// Upload a file with options
let upload_options = UploadOptions {
chunk_size: Some(1024 * 1024), // 1MB chunks
verify: true,
};
let file_hash = client.upload_file("/path/to/file.txt", Some(upload_options)).await?;
// Download the file
let download_options = DownloadOptions {
verify: true,
};
client.download_file(&file_hash, "/path/to/output.txt", Some(download_options)).await?;
```
### FList Operations
```rust
// Create an FList from a Docker image
let options = FlistOptions {
auth: None,
username: None,
password: None,
email: None,
server_address: Some("docker.io".to_string()),
identity_token: None,
registry_token: None,
};
let job_id = client.create_flist("alpine:latest", Some(options)).await?;
// Wait for FList creation with progress tracking
let wait_options = WaitOptions {
timeout_seconds: 60,
poll_interval_ms: 1000,
progress_callback: Some(Box::new(|state| {
println!("Progress: FList state is now {:?}", state);
})),
};
let final_state = client.wait_for_flist_creation(&job_id, Some(wait_options)).await?;
// List available FLists
let flists = client.list_flists().await?;
// Preview an FList
let preview = client.preview_flist("flists/user/alpine-latest.fl").await?;
// Download an FList
client.download_flist("flists/user/alpine-latest.fl", "/tmp/downloaded_flist.fl").await?;
```
### Block Management
```rust
// List blocks
let blocks_list = client.list_blocks(None).await?;
// Check if a block exists
let exists = client.check_block("block_hash").await?;
// Get block content
let block_content = client.get_block("block_hash").await?;
// Upload a block
let block_hash = client.upload_block("file_hash", 0, data).await?;
// Verify blocks
let request = VerifyBlocksRequest { blocks: verify_blocks };
let verify_result = client.verify_blocks(request).await?;
```
## Complete Examples
For more detailed examples, check the `examples` directory:
- `authentication.rs`: Authentication and health check examples
- `file_management.rs`: File upload and download with verification
- `flist_operations.rs`: Complete FList creation, monitoring, listing, preview, and download
- `block_management.rs`: Block-level operations including listing, verification, and upload
- `wait_for_flist.rs`: Advanced FList creation with progress monitoring
Run an example with:
```bash
cargo run --example flist_operations
```
## Development
This library wraps the OpenAPI-generated client located in the `openapi` directory. The OpenAPI client was generated using the OpenAPI Generator CLI.
To build the library:
```bash
cargo build
```
To run tests:
```bash
cargo test
```
## License
MIT

View File

@ -0,0 +1,42 @@
use sal_rfs_client::RfsClient;
use sal_rfs_client::types::{ClientConfig, Credentials};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with authentication credentials
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 30,
};
let mut client = RfsClient::new(config);
println!("Client created with authentication credentials");
// Authenticate with the server
client.authenticate().await?;
if client.is_authenticated() {
println!("Authentication successful");
} else {
println!("Authentication failed");
}
// Create a client without authentication
let config_no_auth = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: None,
timeout_seconds: 30,
};
let client_no_auth = RfsClient::new(config_no_auth);
println!("Client created without authentication credentials");
// Check health endpoint (doesn't require authentication)
let health = client_no_auth.health_check().await?;
println!("Server health: {:?}", health);
Ok(())
}

View File

@ -0,0 +1,128 @@
use sal_rfs_client::RfsClient;
use sal_rfs_client::types::{ClientConfig, Credentials};
use openapi::models::{VerifyBlock, VerifyBlocksRequest};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
println!("Authentication successful");
// Create a test file to upload for block testing
let test_file_path = "/tmp/block_test.txt";
let test_content = "This is a test file for RFS client block management";
std::fs::write(test_file_path, test_content)?;
println!("Created test file at {}", test_file_path);
// Upload the file to get blocks
println!("Uploading file to get blocks...");
let file_hash = client.upload_file(test_file_path, None).await?;
println!("File uploaded with hash: {}", file_hash);
// Get blocks by file hash
println!("Getting blocks for file hash: {}", file_hash);
let blocks = client.get_blocks_by_hash(&file_hash).await?;
println!("Found {} blocks for the file", blocks.blocks.len());
// Print block information
for (i, block_data) in blocks.blocks.iter().enumerate() {
println!("Block {}: Hash={}, Index={}", i, block_data.hash, block_data.index);
}
// Verify blocks with complete information
println!("Verifying blocks...");
// Create a list of VerifyBlock objects with complete information
let verify_blocks = blocks.blocks.iter().map(|block| {
VerifyBlock {
block_hash: block.hash.clone(),
block_index: block.index,
file_hash: file_hash.clone(), // Using the actual file hash
}
}).collect::<Vec<_>>();
// Create the request with the complete block information
for block in verify_blocks.iter() {
println!("Block: {}", block.block_hash);
println!("Block index: {}", block.block_index);
println!("File hash: {}", block.file_hash);
}
let request = VerifyBlocksRequest { blocks: verify_blocks };
// Send the verification request
let verify_result = client.verify_blocks(request).await?;
println!("Verification result: {} missing blocks", verify_result.missing.len());
for block in verify_result.missing.iter() {
println!("Missing block: {}", block);
}
// List blocks (list_blocks_handler)
println!("\n1. Listing all blocks with pagination...");
let blocks_list = client.list_blocks(None).await?;
println!("Server has {} blocks in total", blocks_list.len());
if !blocks_list.is_empty() {
let first_few = blocks_list.iter().take(3)
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(", ");
println!("First few blocks: {}", first_few);
}
// Check if a block exists (check_block_handler)
if !blocks.blocks.is_empty() {
let block_to_check = &blocks.blocks[0].hash;
println!("\n2. Checking if block exists: {}", block_to_check);
let exists = client.check_block(block_to_check).await?;
println!("Block exists: {}", exists);
}
// Get block downloads statistics (get_block_downloads_handler)
if !blocks.blocks.is_empty() {
let block_to_check = &blocks.blocks[0].hash;
println!("\n3. Getting download statistics for block: {}", block_to_check);
let downloads = client.get_block_downloads(block_to_check).await?;
println!("Block has been downloaded {} times", downloads.downloads_count);
}
// Get a specific block content (get_block_handler)
if !blocks.blocks.is_empty() {
let block_to_get = &blocks.blocks[0].hash;
println!("\n4. Getting content for block: {}", block_to_get);
let block_content = client.get_block(block_to_get).await?;
println!("Retrieved block with {} bytes", block_content.len());
}
// Get user blocks (get_user_blocks_handler)
println!("\n6. Listing user blocks...");
let user_blocks = client.get_user_blocks(Some(1), Some(10)).await?;
println!("User has {} blocks (showing page 1 with 10 per page)", user_blocks.total);
for block in user_blocks.blocks.iter().take(3) {
println!(" - Block: {}, Size: {}", block.hash, block.size);
}
// Upload a block (upload_block_handler)
println!("\n7. Uploading a new test block...");
let test_block_data = b"This is test block data for direct block upload";
let new_file_hash = "test_file_hash_for_block_upload";
let block_index = 0;
let block_hash = client.upload_block(new_file_hash, block_index, test_block_data.to_vec()).await?;
println!("Uploaded block with hash: {}", block_hash);
// Clean up
std::fs::remove_file(test_file_path)?;
println!("Test file cleaned up");
Ok(())
}

View File

@ -0,0 +1,64 @@
use sal_rfs_client::RfsClient;
use sal_rfs_client::types::{ClientConfig, Credentials, UploadOptions, DownloadOptions};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
println!("Authentication successful");
// Create a test file to upload
let test_file_path = "/tmp/test_upload.txt";
std::fs::write(test_file_path, "This is a test file for RFS client upload")?;
println!("Created test file at {}", test_file_path);
// Upload the file with options
println!("Uploading file...");
let upload_options = UploadOptions {
chunk_size: Some(1024 * 1024), // 1MB chunks
verify: true,
};
let file_hash = client.upload_file(test_file_path, Some(upload_options)).await?;
println!("File uploaded with hash: {}", file_hash);
// Download the file
let download_path = "/tmp/test_download.txt";
println!("Downloading file to {}...", download_path);
let download_options = DownloadOptions {
verify: true,
};
client.download_file(&file_hash, download_path, Some(download_options)).await?;
println!("File downloaded to {}", download_path);
// Verify the downloaded file matches the original
let original_content = std::fs::read_to_string(test_file_path)?;
let downloaded_content = std::fs::read_to_string(download_path)?;
if original_content == downloaded_content {
println!("File contents match! Download successful.");
} else {
println!("ERROR: File contents do not match!");
}
// Clean up test files
std::fs::remove_file(test_file_path)?;
std::fs::remove_file(download_path)?;
println!("Test files cleaned up");
Ok(())
}

View File

@ -0,0 +1,170 @@
use sal_rfs_client::RfsClient;
use sal_rfs_client::types::{ClientConfig, Credentials, FlistOptions, WaitOptions};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let parent_dir = "flists";
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
println!("Authentication successful");
println!("\n1. CREATE FLIST - Creating an FList from a Docker image");
let image_name = "alpine:latest";
println!("Creating FList for image: {}", image_name);
// Use FlistOptions to specify additional parameters
let options = FlistOptions {
auth: None,
username: None,
password: None,
email: None,
server_address: Some("docker.io".to_string()),
identity_token: None,
registry_token: None,
};
// Create the FList and handle potential conflict error
let job_id = match client.create_flist(&image_name, Some(options)).await {
Ok(id) => {
println!("FList creation started with job ID: {}", id);
Some(id)
},
Err(e) => {
if e.to_string().contains("Conflict") {
println!("FList already exists");
None
} else {
return Err(e.into());
}
}
};
// 2. Check FList state if we have a job ID
if let Some(job_id) = &job_id {
println!("\n2. GET FLIST STATE - Checking FList creation state");
let state = client.get_flist_state(job_id).await?;
println!("Current FList state: {:?}", state.flist_state);
// 3. Wait for FList creation with progress reporting
println!("\n3. WAIT FOR FLIST CREATION - Waiting for FList to be created with progress reporting");
let wait_options = WaitOptions {
timeout_seconds: 60, // Shorter timeout for the example
poll_interval_ms: 1000,
progress_callback: Some(Box::new(|state| {
println!("Progress: FList state is now {:?}", state);
// No return value needed (returns unit type)
})),
};
// Wait for the FList to be created (with a timeout)
match client.wait_for_flist_creation(job_id, Some(wait_options)).await {
Ok(final_state) => {
println!("FList creation completed with state: {:?}", final_state);
},
Err(e) => {
println!("Error waiting for FList creation: {}", e);
// Continue with the example even if waiting fails
}
};
}
// 4. List all available FLists
println!("\n4. LIST FLISTS - Listing all available FLists");
// Variable to store the FList path for preview and download
let mut flist_path_for_preview: Option<String> = None;
match client.list_flists().await {
Ok(flists) => {
println!("Found {} FList categories", flists.len());
for (category, files) in &flists {
println!("Category: {}", category);
for file in files.iter().take(2) { // Show only first 2 files per category
println!(" - {} (size: {} bytes)", file.name, file.size);
// Save the first FList path for preview
if flist_path_for_preview.is_none() {
let path = format!("{}/{}/{}", parent_dir, category, file.name);
flist_path_for_preview = Some(path);
}
}
if files.len() > 2 {
println!(" - ... and {} more files", files.len() - 2);
}
}
// 5. Preview an FList if we found one
if let Some(ref flist_path) = flist_path_for_preview {
println!("\n5. PREVIEW FLIST - Previewing FList: {}", flist_path);
match client.preview_flist(flist_path).await {
Ok(preview) => {
println!("FList preview for {}:", flist_path);
println!(" - Checksum: {}", preview.checksum);
println!(" - Metadata: {}", preview.metadata);
// Display content (list of strings)
if !preview.content.is_empty() {
println!(" - Content entries:");
for (i, entry) in preview.content.iter().enumerate().take(5) {
println!(" {}. {}", i+1, entry);
}
if preview.content.len() > 5 {
println!(" ... and {} more entries", preview.content.len() - 5);
}
}
},
Err(e) => println!("Error previewing FList: {}", e),
}
} else {
println!("No FLists available for preview");
}
},
Err(e) => println!("Error listing FLists: {}", e),
}
// 6. DOWNLOAD FLIST - Downloading an FList to a local file
if let Some(ref flist_path) = flist_path_for_preview {
println!("\n6. DOWNLOAD FLIST - Downloading FList: {}", flist_path);
// Create a temporary output path for the downloaded FList
let output_path = "/tmp/downloaded_flist.fl";
match client.download_flist(flist_path, output_path).await {
Ok(_) => {
println!("FList successfully downloaded to {}", output_path);
// Get file size
match std::fs::metadata(output_path) {
Ok(metadata) => println!("Downloaded file size: {} bytes", metadata.len()),
Err(e) => println!("Error getting file metadata: {}", e),
}
},
Err(e) => println!("Error downloading FList: {}", e),
}
} else {
println!("\n6. DOWNLOAD FLIST - No FList available for download");
}
println!("\nAll FList operations demonstrated:");
println!("1. create_flist - Create a new FList from a Docker image");
println!("2. get_flist_state - Check the state of an FList creation job");
println!("3. wait_for_flist_creation - Wait for an FList to be created with progress reporting");
println!("4. list_flists - List all available FLists");
println!("5. preview_flist - Preview the content of an FList");
println!("6. download_flist - Download an FList to a local file");
Ok(())
}

View File

@ -0,0 +1,61 @@
use sal_rfs_client::RfsClient;
use sal_rfs_client::types::{ClientConfig, Credentials, WaitOptions};
use openapi::models::FlistState;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client with authentication
let config = ClientConfig {
base_url: "http://localhost:8080".to_string(),
credentials: Some(Credentials {
username: "user".to_string(),
password: "password".to_string(),
}),
timeout_seconds: 60,
};
let mut client = RfsClient::new(config);
// Authenticate with the server
client.authenticate().await?;
println!("Authentication successful");
// Create an FList from a Docker image
let image_name = "redis:latest";
println!("Creating FList for image: {}", image_name);
let job_id = client.create_flist(&image_name, None).await?;
println!("FList creation started with job ID: {}", job_id);
// Set up options for waiting with progress reporting
let options = WaitOptions {
timeout_seconds: 600, // 10 minutes timeout
poll_interval_ms: 2000, // Check every 2 seconds
progress_callback: Some(Box::new(|state| {
match state {
FlistState::FlistStateInProgress(info) => {
println!("Progress: {:.1}% - {}", info.in_progress.progress, info.in_progress.msg);
},
FlistState::FlistStateStarted(_) => {
println!("FList creation started...");
},
FlistState::FlistStateAccepted(_) => {
println!("FList creation request accepted...");
},
_ => println!("State: {:?}", state),
}
})),
};
// Wait for the FList to be created
println!("Waiting for FList creation to complete...");
// Use ? operator to propagate errors properly
let state = client.wait_for_flist_creation(&job_id, Some(options)).await
.map_err(|e| -> Box<dyn std::error::Error> { Box::new(e) })?;
println!("FList created successfully!");
println!("Final state: {:?}", state);
Ok(())
}

1
rfs-client/openapi.json Normal file

File diff suppressed because one or more lines are too long

3
rfs-client/openapi/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
/target/
**/*.rs.bk
Cargo.lock

View File

@ -0,0 +1,23 @@
# OpenAPI Generator Ignore
# Generated by openapi-generator https://github.com/openapitools/openapi-generator
# Use this file to prevent files from being overwritten by the generator.
# The patterns follow closely to .gitignore or .dockerignore.
# As an example, the C# client generator defines ApiClient.cs.
# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line:
#ApiClient.cs
# You can match any string of characters against a directory, file or extension with a single asterisk (*):
#foo/*/qux
# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux
# You can recursively match patterns against a directory, file or extension with a double asterisk (**):
#foo/**/qux
# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux
# You can also negate patterns with an exclamation (!).
# For example, you can ignore all files in a docs folder with the file extension .md:
#docs/*.md
# Then explicitly reverse the ignore rule for a single file:
#!docs/README.md

View File

@ -0,0 +1,125 @@
.gitignore
.travis.yml
Cargo.toml
README.md
docs/AuthenticationApi.md
docs/BlockDownloadsResponse.md
docs/BlockInfo.md
docs/BlockManagementApi.md
docs/BlockUploadedResponse.md
docs/BlocksResponse.md
docs/DirListTemplate.md
docs/DirLister.md
docs/ErrorTemplate.md
docs/FileDownloadRequest.md
docs/FileInfo.md
docs/FileManagementApi.md
docs/FileUploadResponse.md
docs/FlistBody.md
docs/FlistManagementApi.md
docs/FlistState.md
docs/FlistStateAccepted.md
docs/FlistStateCreated.md
docs/FlistStateInProgress.md
docs/FlistStateInfo.md
docs/FlistStateResponse.md
docs/FlistStateStarted.md
docs/HealthResponse.md
docs/Job.md
docs/ListBlocksParams.md
docs/ListBlocksResponse.md
docs/PreviewResponse.md
docs/ResponseError.md
docs/ResponseErrorBadRequest.md
docs/ResponseErrorConflict.md
docs/ResponseErrorForbidden.md
docs/ResponseErrorNotFound.md
docs/ResponseErrorTemplateError.md
docs/ResponseErrorUnauthorized.md
docs/ResponseResult.md
docs/ResponseResultBlockUploaded.md
docs/ResponseResultDirTemplate.md
docs/ResponseResultFileUploaded.md
docs/ResponseResultFlistCreated.md
docs/ResponseResultFlistState.md
docs/ResponseResultFlists.md
docs/ResponseResultPreviewFlist.md
docs/ResponseResultRes.md
docs/ResponseResultSignedIn.md
docs/SignInBody.md
docs/SignInResponse.md
docs/SystemApi.md
docs/TemplateErr.md
docs/TemplateErrBadRequest.md
docs/TemplateErrInternalServerError.md
docs/TemplateErrNotFound.md
docs/UploadBlockParams.md
docs/UserBlockInfo.md
docs/UserBlocksResponse.md
docs/VerifyBlock.md
docs/VerifyBlocksRequest.md
docs/VerifyBlocksResponse.md
docs/WebsiteServingApi.md
git_push.sh
src/apis/authentication_api.rs
src/apis/block_management_api.rs
src/apis/configuration.rs
src/apis/file_management_api.rs
src/apis/flist_management_api.rs
src/apis/mod.rs
src/apis/system_api.rs
src/apis/website_serving_api.rs
src/lib.rs
src/models/block_downloads_response.rs
src/models/block_info.rs
src/models/block_uploaded_response.rs
src/models/blocks_response.rs
src/models/dir_list_template.rs
src/models/dir_lister.rs
src/models/error_template.rs
src/models/file_download_request.rs
src/models/file_info.rs
src/models/file_upload_response.rs
src/models/flist_body.rs
src/models/flist_state.rs
src/models/flist_state_accepted.rs
src/models/flist_state_created.rs
src/models/flist_state_in_progress.rs
src/models/flist_state_info.rs
src/models/flist_state_response.rs
src/models/flist_state_started.rs
src/models/health_response.rs
src/models/job.rs
src/models/list_blocks_params.rs
src/models/list_blocks_response.rs
src/models/mod.rs
src/models/preview_response.rs
src/models/response_error.rs
src/models/response_error_bad_request.rs
src/models/response_error_conflict.rs
src/models/response_error_forbidden.rs
src/models/response_error_not_found.rs
src/models/response_error_template_error.rs
src/models/response_error_unauthorized.rs
src/models/response_result.rs
src/models/response_result_block_uploaded.rs
src/models/response_result_dir_template.rs
src/models/response_result_file_uploaded.rs
src/models/response_result_flist_created.rs
src/models/response_result_flist_state.rs
src/models/response_result_flists.rs
src/models/response_result_preview_flist.rs
src/models/response_result_res.rs
src/models/response_result_signed_in.rs
src/models/sign_in_body.rs
src/models/sign_in_response.rs
src/models/template_err.rs
src/models/template_err_bad_request.rs
src/models/template_err_internal_server_error.rs
src/models/template_err_not_found.rs
src/models/upload_block_params.rs
src/models/user_block_info.rs
src/models/user_blocks_response.rs
src/models/verify_block.rs
src/models/verify_blocks_request.rs
src/models/verify_blocks_response.rs

View File

@ -0,0 +1 @@
7.13.0

View File

@ -0,0 +1 @@
language: rust

View File

@ -0,0 +1,15 @@
[package]
name = "openapi"
version = "0.2.0"
authors = ["OpenAPI Generator team and contributors"]
description = "No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)"
license = ""
edition = "2021"
[dependencies]
serde = { version = "^1.0", features = ["derive"] }
serde_with = { version = "^3.8", default-features = false, features = ["base64", "std", "macros"] }
serde_json = "^1.0"
serde_repr = "^0.1"
url = "^2.5"
reqwest = { version = "^0.12", default-features = false, features = ["json", "multipart"] }

View File

@ -0,0 +1,114 @@
# Rust API client for openapi
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
## Overview
This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project. By using the [openapi-spec](https://openapis.org) from a remote server, you can easily generate an API client.
- API version: 0.2.0
- Package version: 0.2.0
- Generator version: 7.13.0
- Build package: `org.openapitools.codegen.languages.RustClientCodegen`
## Installation
Put the package under your project folder in a directory named `openapi` and add the following to `Cargo.toml` under `[dependencies]`:
```
openapi = { path = "./openapi" }
```
## Documentation for API Endpoints
All URIs are relative to *http://localhost*
Class | Method | HTTP request | Description
------------ | ------------- | ------------- | -------------
*AuthenticationApi* | [**sign_in_handler**](docs/AuthenticationApi.md#sign_in_handler) | **POST** /api/v1/signin |
*BlockManagementApi* | [**check_block_handler**](docs/BlockManagementApi.md#check_block_handler) | **HEAD** /api/v1/block/{hash} | Checks a block by its hash.
*BlockManagementApi* | [**get_block_downloads_handler**](docs/BlockManagementApi.md#get_block_downloads_handler) | **GET** /api/v1/block/{hash}/downloads | Retrieve the number of times a block has been downloaded.
*BlockManagementApi* | [**get_block_handler**](docs/BlockManagementApi.md#get_block_handler) | **GET** /api/v1/block/{hash} | Retrieve a block by its hash.
*BlockManagementApi* | [**get_blocks_by_hash_handler**](docs/BlockManagementApi.md#get_blocks_by_hash_handler) | **GET** /api/v1/blocks/{hash} | Retrieve blocks by hash (file hash or block hash).
*BlockManagementApi* | [**get_user_blocks_handler**](docs/BlockManagementApi.md#get_user_blocks_handler) | **GET** /api/v1/user/blocks | Retrieve all blocks uploaded by a specific user.
*BlockManagementApi* | [**list_blocks_handler**](docs/BlockManagementApi.md#list_blocks_handler) | **GET** /api/v1/blocks | List all block hashes in the server with pagination
*BlockManagementApi* | [**upload_block_handler**](docs/BlockManagementApi.md#upload_block_handler) | **POST** /api/v1/block | Upload a block to the server.
*BlockManagementApi* | [**verify_blocks_handler**](docs/BlockManagementApi.md#verify_blocks_handler) | **POST** /api/v1/block/verify | Verify if multiple blocks exist on the server.
*FileManagementApi* | [**get_file_handler**](docs/FileManagementApi.md#get_file_handler) | **GET** /api/v1/file/{hash} | Retrieve a file by its hash from path, with optional custom filename in request body.
*FileManagementApi* | [**upload_file_handler**](docs/FileManagementApi.md#upload_file_handler) | **POST** /api/v1/file | Upload a file to the server.
*FlistManagementApi* | [**create_flist_handler**](docs/FlistManagementApi.md#create_flist_handler) | **POST** /api/v1/fl |
*FlistManagementApi* | [**get_flist_state_handler**](docs/FlistManagementApi.md#get_flist_state_handler) | **GET** /api/v1/fl/{job_id} |
*FlistManagementApi* | [**list_flists_handler**](docs/FlistManagementApi.md#list_flists_handler) | **GET** /api/v1/fl |
*FlistManagementApi* | [**preview_flist_handler**](docs/FlistManagementApi.md#preview_flist_handler) | **GET** /api/v1/fl/preview/{flist_path} |
*FlistManagementApi* | [**serve_flists**](docs/FlistManagementApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem
*SystemApi* | [**health_check_handler**](docs/SystemApi.md#health_check_handler) | **GET** /api/v1 |
*WebsiteServingApi* | [**serve_website_handler**](docs/WebsiteServingApi.md#serve_website_handler) | **GET** /api/v1/website/{website_hash}/{path} |
## Documentation For Models
- [BlockDownloadsResponse](docs/BlockDownloadsResponse.md)
- [BlockInfo](docs/BlockInfo.md)
- [BlockUploadedResponse](docs/BlockUploadedResponse.md)
- [BlocksResponse](docs/BlocksResponse.md)
- [DirListTemplate](docs/DirListTemplate.md)
- [DirLister](docs/DirLister.md)
- [ErrorTemplate](docs/ErrorTemplate.md)
- [FileDownloadRequest](docs/FileDownloadRequest.md)
- [FileInfo](docs/FileInfo.md)
- [FileUploadResponse](docs/FileUploadResponse.md)
- [FlistBody](docs/FlistBody.md)
- [FlistState](docs/FlistState.md)
- [FlistStateAccepted](docs/FlistStateAccepted.md)
- [FlistStateCreated](docs/FlistStateCreated.md)
- [FlistStateInProgress](docs/FlistStateInProgress.md)
- [FlistStateInfo](docs/FlistStateInfo.md)
- [FlistStateResponse](docs/FlistStateResponse.md)
- [FlistStateStarted](docs/FlistStateStarted.md)
- [HealthResponse](docs/HealthResponse.md)
- [Job](docs/Job.md)
- [ListBlocksParams](docs/ListBlocksParams.md)
- [ListBlocksResponse](docs/ListBlocksResponse.md)
- [PreviewResponse](docs/PreviewResponse.md)
- [ResponseError](docs/ResponseError.md)
- [ResponseErrorBadRequest](docs/ResponseErrorBadRequest.md)
- [ResponseErrorConflict](docs/ResponseErrorConflict.md)
- [ResponseErrorForbidden](docs/ResponseErrorForbidden.md)
- [ResponseErrorNotFound](docs/ResponseErrorNotFound.md)
- [ResponseErrorTemplateError](docs/ResponseErrorTemplateError.md)
- [ResponseErrorUnauthorized](docs/ResponseErrorUnauthorized.md)
- [ResponseResult](docs/ResponseResult.md)
- [ResponseResultBlockUploaded](docs/ResponseResultBlockUploaded.md)
- [ResponseResultDirTemplate](docs/ResponseResultDirTemplate.md)
- [ResponseResultFileUploaded](docs/ResponseResultFileUploaded.md)
- [ResponseResultFlistCreated](docs/ResponseResultFlistCreated.md)
- [ResponseResultFlistState](docs/ResponseResultFlistState.md)
- [ResponseResultFlists](docs/ResponseResultFlists.md)
- [ResponseResultPreviewFlist](docs/ResponseResultPreviewFlist.md)
- [ResponseResultRes](docs/ResponseResultRes.md)
- [ResponseResultSignedIn](docs/ResponseResultSignedIn.md)
- [SignInBody](docs/SignInBody.md)
- [SignInResponse](docs/SignInResponse.md)
- [TemplateErr](docs/TemplateErr.md)
- [TemplateErrBadRequest](docs/TemplateErrBadRequest.md)
- [TemplateErrInternalServerError](docs/TemplateErrInternalServerError.md)
- [TemplateErrNotFound](docs/TemplateErrNotFound.md)
- [UploadBlockParams](docs/UploadBlockParams.md)
- [UserBlockInfo](docs/UserBlockInfo.md)
- [UserBlocksResponse](docs/UserBlocksResponse.md)
- [VerifyBlock](docs/VerifyBlock.md)
- [VerifyBlocksRequest](docs/VerifyBlocksRequest.md)
- [VerifyBlocksResponse](docs/VerifyBlocksResponse.md)
To get access to the crate's generated documentation, use:
```
cargo doc --open
```
## Author

View File

@ -0,0 +1,37 @@
# \AuthenticationApi
All URIs are relative to *http://localhost*
Method | HTTP request | Description
------------- | ------------- | -------------
[**sign_in_handler**](AuthenticationApi.md#sign_in_handler) | **POST** /api/v1/signin |
## sign_in_handler
> models::SignInResponse sign_in_handler(sign_in_body)
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**sign_in_body** | [**SignInBody**](SignInBody.md) | | [required] |
### Return type
[**models::SignInResponse**](SignInResponse.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: application/json
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)

View File

@ -0,0 +1,14 @@
# Block
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**data** | [**std::path::PathBuf**](std::path::PathBuf.md) | |
**hash** | **String** | |
**index** | **i64** | |
**size** | **i32** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,13 @@
# BlockDownloadsResponse
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**block_hash** | **String** | Block hash |
**block_size** | **i64** | Size of the block in bytes |
**downloads_count** | **i64** | Number of times the block has been downloaded |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,12 @@
# BlockInfo
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**hash** | **String** | Block hash |
**index** | **i64** | Block index within the file |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,250 @@
# \BlockManagementApi
All URIs are relative to *http://localhost*
Method | HTTP request | Description
------------- | ------------- | -------------
[**check_block_handler**](BlockManagementApi.md#check_block_handler) | **HEAD** /api/v1/block/{hash} | Checks a block by its hash.
[**get_block_downloads_handler**](BlockManagementApi.md#get_block_downloads_handler) | **GET** /api/v1/block/{hash}/downloads | Retrieve the number of times a block has been downloaded.
[**get_block_handler**](BlockManagementApi.md#get_block_handler) | **GET** /api/v1/block/{hash} | Retrieve a block by its hash.
[**get_blocks_by_hash_handler**](BlockManagementApi.md#get_blocks_by_hash_handler) | **GET** /api/v1/blocks/{hash} | Retrieve blocks by hash (file hash or block hash).
[**get_user_blocks_handler**](BlockManagementApi.md#get_user_blocks_handler) | **GET** /api/v1/user/blocks | Retrieve all blocks uploaded by a specific user.
[**list_blocks_handler**](BlockManagementApi.md#list_blocks_handler) | **GET** /api/v1/blocks | List all block hashes in the server with pagination
[**upload_block_handler**](BlockManagementApi.md#upload_block_handler) | **POST** /api/v1/block | Upload a block to the server.
[**verify_blocks_handler**](BlockManagementApi.md#verify_blocks_handler) | **POST** /api/v1/block/verify | Verify if multiple blocks exist on the server.
## check_block_handler
> check_block_handler(hash)
Checks a block by its hash.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**hash** | **String** | Block hash | [required] |
### Return type
(empty response body)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## get_block_downloads_handler
> models::BlockDownloadsResponse get_block_downloads_handler(hash)
Retrieve the number of times a block has been downloaded.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**hash** | **String** | Block hash | [required] |
### Return type
[**models::BlockDownloadsResponse**](BlockDownloadsResponse.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## get_block_handler
> std::path::PathBuf get_block_handler(hash)
Retrieve a block by its hash.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**hash** | **String** | Block hash | [required] |
### Return type
[**std::path::PathBuf**](std::path::PathBuf.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/octet-stream, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## get_blocks_by_hash_handler
> models::BlocksResponse get_blocks_by_hash_handler(hash)
Retrieve blocks by hash (file hash or block hash).
If the hash is a file hash, returns all blocks with their block index related to that file. If the hash is a block hash, returns the block itself.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**hash** | **String** | File hash or block hash | [required] |
### Return type
[**models::BlocksResponse**](BlocksResponse.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## get_user_blocks_handler
> models::UserBlocksResponse get_user_blocks_handler(page, per_page)
Retrieve all blocks uploaded by a specific user.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**page** | Option<**i32**> | Page number (1-indexed) | |
**per_page** | Option<**i32**> | Number of items per page | |
### Return type
[**models::UserBlocksResponse**](UserBlocksResponse.md)
### Authorization
[bearerAuth](../README.md#bearerAuth)
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## list_blocks_handler
> models::ListBlocksResponse list_blocks_handler(page, per_page)
List all block hashes in the server with pagination
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**page** | Option<**i32**> | Page number (1-indexed) | |
**per_page** | Option<**i32**> | Number of items per page | |
### Return type
[**models::ListBlocksResponse**](ListBlocksResponse.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## upload_block_handler
> models::BlockUploadedResponse upload_block_handler(file_hash, idx, body)
Upload a block to the server.
If the block already exists, the server will return a 200 OK response. If the block is new, the server will return a 201 Created response.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**file_hash** | **String** | File hash associated with the block | [required] |
**idx** | **i64** | Block index within the file | [required] |
**body** | **std::path::PathBuf** | Block data to upload | [required] |
### Return type
[**models::BlockUploadedResponse**](BlockUploadedResponse.md)
### Authorization
[bearerAuth](../README.md#bearerAuth)
### HTTP request headers
- **Content-Type**: application/octet-stream
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## verify_blocks_handler
> models::VerifyBlocksResponse verify_blocks_handler(verify_blocks_request)
Verify if multiple blocks exist on the server.
Returns a list of missing blocks.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**verify_blocks_request** | [**VerifyBlocksRequest**](VerifyBlocksRequest.md) | List of block hashes to verify | [required] |
### Return type
[**models::VerifyBlocksResponse**](VerifyBlocksResponse.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: application/json
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)

View File

@ -0,0 +1,12 @@
# BlockUploadedResponse
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**hash** | **String** | |
**message** | **String** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# BlocksResponse
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**blocks** | [**Vec<models::BlockInfo>**](BlockInfo.md) | List of blocks with their indices |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,12 @@
# DirListTemplate
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**cur_path** | **String** | |
**lister** | [**models::DirLister**](DirLister.md) | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# DirLister
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**files** | [**Vec<models::FileInfo>**](FileInfo.md) | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,13 @@
# ErrorTemplate
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**cur_path** | **String** | |
**err** | [**models::TemplateErr**](TemplateErr.md) | |
**message** | **String** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,12 @@
# File
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**file_content** | [**std::path::PathBuf**](std::path::PathBuf.md) | |
**file_hash** | **String** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# FileDownloadRequest
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**file_name** | **String** | The custom filename to use for download |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,16 @@
# FileInfo
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**is_file** | **bool** | |
**last_modified** | **i64** | |
**name** | **String** | |
**path_uri** | **String** | |
**progress** | **f32** | |
**size** | **i64** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,71 @@
# \FileManagementApi
All URIs are relative to *http://localhost*
Method | HTTP request | Description
------------- | ------------- | -------------
[**get_file_handler**](FileManagementApi.md#get_file_handler) | **GET** /api/v1/file/{hash} | Retrieve a file by its hash from path, with optional custom filename in request body.
[**upload_file_handler**](FileManagementApi.md#upload_file_handler) | **POST** /api/v1/file | Upload a file to the server.
## get_file_handler
> std::path::PathBuf get_file_handler(hash, file_download_request)
Retrieve a file by its hash from path, with optional custom filename in request body.
The file will be reconstructed from its blocks.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**hash** | **String** | File hash | [required] |
**file_download_request** | [**FileDownloadRequest**](FileDownloadRequest.md) | Optional custom filename for download | [required] |
### Return type
[**std::path::PathBuf**](std::path::PathBuf.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: application/json
- **Accept**: application/octet-stream, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## upload_file_handler
> models::FileUploadResponse upload_file_handler(body)
Upload a file to the server.
The file will be split into blocks and stored in the database.
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**body** | **std::path::PathBuf** | File data to upload | [required] |
### Return type
[**models::FileUploadResponse**](FileUploadResponse.md)
### Authorization
[bearerAuth](../README.md#bearerAuth)
### HTTP request headers
- **Content-Type**: application/octet-stream
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)

View File

@ -0,0 +1,12 @@
# FileUploadResponse
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**file_hash** | **String** | The file hash |
**message** | **String** | Message indicating success |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,18 @@
# FlistBody
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**auth** | Option<**String**> | | [optional]
**email** | Option<**String**> | | [optional]
**identity_token** | Option<**String**> | | [optional]
**image_name** | **String** | |
**password** | Option<**String**> | | [optional]
**registry_token** | Option<**String**> | | [optional]
**server_address** | Option<**String**> | | [optional]
**username** | Option<**String**> | | [optional]
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,150 @@
# \FlistManagementApi
All URIs are relative to *http://localhost*
Method | HTTP request | Description
------------- | ------------- | -------------
[**create_flist_handler**](FlistManagementApi.md#create_flist_handler) | **POST** /api/v1/fl |
[**get_flist_state_handler**](FlistManagementApi.md#get_flist_state_handler) | **GET** /api/v1/fl/{job_id} |
[**list_flists_handler**](FlistManagementApi.md#list_flists_handler) | **GET** /api/v1/fl |
[**preview_flist_handler**](FlistManagementApi.md#preview_flist_handler) | **GET** /api/v1/fl/preview/{flist_path} |
[**serve_flists**](FlistManagementApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem
## create_flist_handler
> models::Job create_flist_handler(flist_body)
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**flist_body** | [**FlistBody**](FlistBody.md) | | [required] |
### Return type
[**models::Job**](Job.md)
### Authorization
[bearerAuth](../README.md#bearerAuth)
### HTTP request headers
- **Content-Type**: application/json
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## get_flist_state_handler
> models::FlistStateResponse get_flist_state_handler(job_id)
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**job_id** | **String** | flist job id | [required] |
### Return type
[**models::FlistStateResponse**](FlistStateResponse.md)
### Authorization
[bearerAuth](../README.md#bearerAuth)
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## list_flists_handler
> std::collections::HashMap<String, Vec<models::FileInfo>> list_flists_handler()
### Parameters
This endpoint does not need any parameter.
### Return type
[**std::collections::HashMap<String, Vec<models::FileInfo>>**](Vec.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## preview_flist_handler
> models::PreviewResponse preview_flist_handler(flist_path)
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**flist_path** | **String** | flist file path | [required] |
### Return type
[**models::PreviewResponse**](PreviewResponse.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
## serve_flists
> std::path::PathBuf serve_flists(path)
Serve flist files from the server's filesystem
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**path** | **String** | Path to the flist file or directory to serve | [required] |
### Return type
[**std::path::PathBuf**](std::path::PathBuf.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/octet-stream, application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)

View File

@ -0,0 +1,37 @@
# \FlistServingApi
All URIs are relative to *http://localhost*
Method | HTTP request | Description
------------- | ------------- | -------------
[**serve_flists**](FlistServingApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem
## serve_flists
> models::ResponseResult serve_flists(path)
Serve flist files from the server's filesystem
### Parameters
Name | Type | Description | Required | Notes
------------- | ------------- | ------------- | ------------- | -------------
**path** | **String** | Path to the flist file or directory to serve | [required] |
### Return type
[**models::ResponseResult**](ResponseResult.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)

View File

@ -0,0 +1,15 @@
# FlistState
## Enum Variants
| Name | Description |
|---- | -----|
| FlistStateAccepted | |
| FlistStateCreated | |
| FlistStateInProgress | |
| FlistStateStarted | |
| String | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# FlistStateAccepted
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**accepted** | **String** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# FlistStateCreated
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**created** | **String** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# FlistStateInProgress
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**in_progress** | [**models::FlistStateInfo**](FlistStateInfo.md) | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,12 @@
# FlistStateInfo
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**msg** | **String** | |
**progress** | **f32** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# FlistStateResponse
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**flist_state** | [**models::FlistState**](FlistState.md) | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

Some files were not shown because too many files have changed in this diff Show More