4 Commits

Author SHA1 Message Date
Sameh Abouel-saad
5014c2f4a5 feat: add Rhai scripting interface for RFS client operations 2025-07-02 15:09:35 +03:00
Sameh Abouel-saad
ba6f53a28a feat: add UserBlockInfo model to show block size instead of index 2025-06-24 18:13:02 +03:00
Sameh Abouel-saad
b81a0aa61c refactor: rename rfs-client to sal-rfs-client and integrate with workspace dependencies 2025-06-24 17:47:50 +03:00
Sameh Abouel-saad
b02101bd42 Implement rfs-client 2025-06-24 16:10:39 +03:00
712 changed files with 896 additions and 44707 deletions

View File

@@ -1,227 +0,0 @@
name: Publish SAL Crates
on:
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: 'Version to publish (e.g., 0.1.0)'
required: true
type: string
dry_run:
description: 'Dry run (do not actually publish)'
required: false
type: boolean
default: false
env:
CARGO_TERM_COLOR: always
jobs:
publish:
name: Publish to crates.io
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cache Cargo dependencies
uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Install cargo-edit for version management
run: cargo install cargo-edit
- name: Set version from release tag
if: github.event_name == 'release'
run: |
VERSION=${GITHUB_REF#refs/tags/v}
echo "PUBLISH_VERSION=$VERSION" >> $GITHUB_ENV
echo "Publishing version: $VERSION"
- name: Set version from workflow input
if: github.event_name == 'workflow_dispatch'
run: |
echo "PUBLISH_VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
echo "Publishing version: ${{ github.event.inputs.version }}"
- name: Update version in all crates
run: |
echo "Updating version to $PUBLISH_VERSION"
# Update root Cargo.toml
cargo set-version $PUBLISH_VERSION
# Update each crate
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
if [ -d "$crate" ]; then
cd "$crate"
cargo set-version $PUBLISH_VERSION
cd ..
echo "Updated $crate to version $PUBLISH_VERSION"
fi
done
- name: Run tests
run: cargo test --workspace --verbose
- name: Check formatting
run: cargo fmt --all -- --check
- name: Run clippy
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
- name: Dry run publish (check packages)
run: |
echo "Checking all packages can be published..."
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
if [ -d "$crate" ]; then
echo "Checking $crate..."
cd "$crate"
cargo publish --dry-run
cd ..
fi
done
echo "Checking main crate..."
cargo publish --dry-run
- name: Publish crates (dry run)
if: github.event.inputs.dry_run == 'true'
run: |
echo "🔍 DRY RUN MODE - Would publish the following crates:"
echo "Individual crates: sal-os, sal-process, sal-text, sal-net, sal-git, sal-vault, sal-kubernetes, sal-virt, sal-redisclient, sal-postgresclient, sal-zinit-client, sal-mycelium, sal-rhai"
echo "Meta-crate: sal"
echo "Version: $PUBLISH_VERSION"
- name: Publish individual crates
if: github.event.inputs.dry_run != 'true'
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: |
echo "Publishing individual crates..."
# Crates in dependency order
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
if [ -d "$crate" ]; then
echo "Publishing sal-$crate..."
cd "$crate"
# Retry logic for transient failures
for attempt in 1 2 3; do
if cargo publish --token $CARGO_REGISTRY_TOKEN; then
echo "✅ sal-$crate published successfully"
break
else
if [ $attempt -eq 3 ]; then
echo "❌ Failed to publish sal-$crate after 3 attempts"
exit 1
else
echo "⚠️ Attempt $attempt failed, retrying in 30 seconds..."
sleep 30
fi
fi
done
cd ..
# Wait for crates.io to process
if [ "$crate" != "rhai" ]; then
echo "⏳ Waiting 30 seconds for crates.io to process..."
sleep 30
fi
fi
done
- name: Publish main crate
if: github.event.inputs.dry_run != 'true'
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: |
echo "Publishing main sal crate..."
# Wait a bit longer before publishing the meta-crate
echo "⏳ Waiting 60 seconds for all individual crates to be available..."
sleep 60
# Retry logic for the main crate
for attempt in 1 2 3; do
if cargo publish --token $CARGO_REGISTRY_TOKEN; then
echo "✅ Main sal crate published successfully"
break
else
if [ $attempt -eq 3 ]; then
echo "❌ Failed to publish main sal crate after 3 attempts"
exit 1
else
echo "⚠️ Attempt $attempt failed, retrying in 60 seconds..."
sleep 60
fi
fi
done
- name: Create summary
if: always()
run: |
echo "## 📦 SAL Publishing Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Version:** $PUBLISH_VERSION" >> $GITHUB_STEP_SUMMARY
echo "**Trigger:** ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY
if [ "${{ github.event.inputs.dry_run }}" == "true" ]; then
echo "**Mode:** Dry Run" >> $GITHUB_STEP_SUMMARY
else
echo "**Mode:** Live Publishing" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Published Crates" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- sal-os" >> $GITHUB_STEP_SUMMARY
echo "- sal-process" >> $GITHUB_STEP_SUMMARY
echo "- sal-text" >> $GITHUB_STEP_SUMMARY
echo "- sal-net" >> $GITHUB_STEP_SUMMARY
echo "- sal-git" >> $GITHUB_STEP_SUMMARY
echo "- sal-vault" >> $GITHUB_STEP_SUMMARY
echo "- sal-kubernetes" >> $GITHUB_STEP_SUMMARY
echo "- sal-virt" >> $GITHUB_STEP_SUMMARY
echo "- sal-redisclient" >> $GITHUB_STEP_SUMMARY
echo "- sal-postgresclient" >> $GITHUB_STEP_SUMMARY
echo "- sal-zinit-client" >> $GITHUB_STEP_SUMMARY
echo "- sal-mycelium" >> $GITHUB_STEP_SUMMARY
echo "- sal-rhai" >> $GITHUB_STEP_SUMMARY
echo "- sal (meta-crate)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Usage" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo '```bash' >> $GITHUB_STEP_SUMMARY
echo "# Individual crates" >> $GITHUB_STEP_SUMMARY
echo "cargo add sal-os sal-process sal-text" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "# Meta-crate with features" >> $GITHUB_STEP_SUMMARY
echo "cargo add sal --features core" >> $GITHUB_STEP_SUMMARY
echo "cargo add sal --features all" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY

View File

@@ -1,233 +0,0 @@
name: Test Publishing Setup
on:
push:
branches: [ main, master ]
paths:
- '**/Cargo.toml'
- 'scripts/publish-all.sh'
- '.github/workflows/publish.yml'
pull_request:
branches: [ main, master ]
paths:
- '**/Cargo.toml'
- 'scripts/publish-all.sh'
- '.github/workflows/publish.yml'
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
jobs:
test-publish-setup:
name: Test Publishing Setup
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cache Cargo dependencies
uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-publish-test-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-publish-test-
${{ runner.os }}-cargo-
- name: Install cargo-edit
run: cargo install cargo-edit
- name: Test workspace structure
run: |
echo "Testing workspace structure..."
# Check that all expected crates exist
EXPECTED_CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo)
for crate in "${EXPECTED_CRATES[@]}"; do
if [ -d "$crate" ] && [ -f "$crate/Cargo.toml" ]; then
echo "✅ $crate exists"
else
echo "❌ $crate missing or invalid"
exit 1
fi
done
- name: Test feature configuration
run: |
echo "Testing feature configuration..."
# Test that features work correctly
cargo check --features os
cargo check --features process
cargo check --features text
cargo check --features net
cargo check --features git
cargo check --features vault
cargo check --features kubernetes
cargo check --features virt
cargo check --features redisclient
cargo check --features postgresclient
cargo check --features zinit_client
cargo check --features mycelium
cargo check --features rhai
echo "✅ All individual features work"
# Test feature groups
cargo check --features core
cargo check --features clients
cargo check --features infrastructure
cargo check --features scripting
echo "✅ All feature groups work"
# Test all features
cargo check --features all
echo "✅ All features together work"
- name: Test dry-run publishing
run: |
echo "Testing dry-run publishing..."
# Test each individual crate can be packaged
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
echo "Testing sal-$crate..."
cd "$crate"
cargo publish --dry-run
cd ..
echo "✅ sal-$crate can be published"
done
# Test main crate
echo "Testing main sal crate..."
cargo publish --dry-run
echo "✅ Main sal crate can be published"
- name: Test publishing script
run: |
echo "Testing publishing script..."
# Make script executable
chmod +x scripts/publish-all.sh
# Test dry run
./scripts/publish-all.sh --dry-run --version 0.1.0-test
echo "✅ Publishing script works"
- name: Test version consistency
run: |
echo "Testing version consistency..."
# Get version from root Cargo.toml
ROOT_VERSION=$(grep '^version = ' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/')
echo "Root version: $ROOT_VERSION"
# Check all crates have the same version
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai herodo)
for crate in "${CRATES[@]}"; do
if [ -f "$crate/Cargo.toml" ]; then
CRATE_VERSION=$(grep '^version = ' "$crate/Cargo.toml" | head -1 | sed 's/version = "\(.*\)"/\1/')
if [ "$CRATE_VERSION" = "$ROOT_VERSION" ]; then
echo "✅ $crate version matches: $CRATE_VERSION"
else
echo "❌ $crate version mismatch: $CRATE_VERSION (expected $ROOT_VERSION)"
exit 1
fi
fi
done
- name: Test metadata completeness
run: |
echo "Testing metadata completeness..."
# Check that all crates have required metadata
CRATES=(os process text net git vault kubernetes virt redisclient postgresclient zinit_client mycelium rhai)
for crate in "${CRATES[@]}"; do
echo "Checking sal-$crate metadata..."
cd "$crate"
# Check required fields exist
if ! grep -q '^name = "sal-' Cargo.toml; then
echo "❌ $crate missing or incorrect name"
exit 1
fi
if ! grep -q '^description = ' Cargo.toml; then
echo "❌ $crate missing description"
exit 1
fi
if ! grep -q '^repository = ' Cargo.toml; then
echo "❌ $crate missing repository"
exit 1
fi
if ! grep -q '^license = ' Cargo.toml; then
echo "❌ $crate missing license"
exit 1
fi
echo "✅ sal-$crate metadata complete"
cd ..
done
- name: Test dependency resolution
run: |
echo "Testing dependency resolution..."
# Test that all workspace dependencies resolve correctly
cargo tree --workspace > /dev/null
echo "✅ All dependencies resolve correctly"
# Test that there are no dependency conflicts
cargo check --workspace
echo "✅ No dependency conflicts"
- name: Generate publishing report
if: always()
run: |
echo "## 🧪 Publishing Setup Test Report" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### ✅ Tests Passed" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- Workspace structure validation" >> $GITHUB_STEP_SUMMARY
echo "- Feature configuration testing" >> $GITHUB_STEP_SUMMARY
echo "- Dry-run publishing simulation" >> $GITHUB_STEP_SUMMARY
echo "- Publishing script validation" >> $GITHUB_STEP_SUMMARY
echo "- Version consistency check" >> $GITHUB_STEP_SUMMARY
echo "- Metadata completeness verification" >> $GITHUB_STEP_SUMMARY
echo "- Dependency resolution testing" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 📦 Ready for Publishing" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "All SAL crates are ready for publishing to crates.io!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Individual Crates:** 13 modules" >> $GITHUB_STEP_SUMMARY
echo "**Meta-crate:** sal with optional features" >> $GITHUB_STEP_SUMMARY
echo "**Binary:** herodo script executor" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### 🚀 Next Steps" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "1. Create a release tag (e.g., v0.1.0)" >> $GITHUB_STEP_SUMMARY
echo "2. The publish workflow will automatically trigger" >> $GITHUB_STEP_SUMMARY
echo "3. All crates will be published to crates.io" >> $GITHUB_STEP_SUMMARY
echo "4. Users can install with: \`cargo add sal-os\` or \`cargo add sal --features all\`" >> $GITHUB_STEP_SUMMARY

5
.gitignore vendored
View File

@@ -62,8 +62,3 @@ docusaurus.config.ts
sidebars.ts
tsconfig.json
Cargo.toml.bak
for_augment
myenv.sh

View File

@@ -11,40 +11,18 @@ categories = ["os", "filesystem", "api-bindings"]
readme = "README.md"
[workspace]
members = [
"packages/clients/myceliumclient",
"packages/clients/postgresclient",
"packages/clients/redisclient",
"packages/clients/zinitclient",
"packages/clients/rfsclient",
"packages/core/net",
"packages/core/text",
"packages/crypt/vault",
"packages/data/ourdb",
"packages/data/radixtree",
"packages/data/tst",
"packages/system/git",
"packages/system/kubernetes",
"packages/system/os",
"packages/system/process",
"packages/system/virt",
"rhai",
"rhailib",
"herodo",
"packages/clients/hetznerclient",
"packages/ai/codemonkey",
]
members = [".", "vault", "git", "redisclient", "mycelium", "text", "os", "net", "zinit_client", "process", "virt", "postgresclient", "rhai", "herodo", "rfs-client"]
resolver = "2"
[workspace.metadata]
# Workspace-level metadata
rust-version = "1.70.0"
rust-version = "1.85.0"
[workspace.dependencies]
# Core shared dependencies with consistent versions
anyhow = "1.0.98"
base64 = "0.22.1"
bytes = "1.7.1"
bytes = "1.4.0"
dirs = "6.0.0"
env_logger = "0.11.8"
futures = "0.3.30"
@@ -55,7 +33,7 @@ log = "0.4"
once_cell = "1.18.0"
rand = "0.8.5"
regex = "1.8.1"
reqwest = { version = "0.12.15", features = ["json", "blocking"] }
reqwest = { version = "0.12.15", features = ["json"] }
rhai = { version = "1.12.0", features = ["sync"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
@@ -76,10 +54,6 @@ chacha20poly1305 = "0.10.1"
k256 = { version = "0.13.4", features = ["ecdsa", "ecdh"] }
sha2 = "0.10.7"
hex = "0.4"
bincode = { version = "2.0.1", features = ["serde"] }
pbkdf2 = "0.12.2"
getrandom = { version = "0.3.3", features = ["wasm_js"] }
tera = "1.19.0"
# Ethereum dependencies
ethers = { version = "2.0.7", features = ["legacy"] }
@@ -93,114 +67,22 @@ windows = { version = "0.61.1", features = [
] }
# Specialized dependencies
zinit-client = "0.4.0"
zinit-client = "0.3.0"
urlencoding = "2.1.3"
tokio-test = "0.4.4"
kube = { version = "0.95.0", features = ["client", "config", "derive"] }
k8s-openapi = { version = "0.23.0", features = ["latest"] }
tokio-retry = "0.3.0"
governor = "0.6.3"
tower = { version = "0.5.2", features = ["timeout", "limit"] }
serde_yaml = "0.9"
postgres-types = "0.2.5"
r2d2 = "0.8.10"
# SAL dependencies
sal-git = { path = "packages/system/git" }
sal-kubernetes = { path = "packages/system/kubernetes" }
sal-redisclient = { path = "packages/clients/redisclient" }
sal-mycelium = { path = "packages/clients/myceliumclient" }
sal-hetzner = { path = "packages/clients/hetznerclient" }
sal-rfs-client = { path = "packages/clients/rfsclient" }
sal-text = { path = "packages/core/text" }
sal-os = { path = "packages/system/os" }
sal-net = { path = "packages/core/net" }
sal-zinit-client = { path = "packages/clients/zinitclient" }
sal-process = { path = "packages/system/process" }
sal-virt = { path = "packages/system/virt" }
sal-postgresclient = { path = "packages/clients/postgresclient" }
sal-vault = { path = "packages/crypt/vault" }
sal-rhai = { path = "rhai" }
sal-service-manager = { path = "_archive/service_manager" }
[dependencies]
thiserror = { workspace = true }
tokio = { workspace = true }
# Optional dependencies - users can choose which modules to include
sal-git = { workspace = true, optional = true }
sal-kubernetes = { workspace = true, optional = true }
sal-redisclient = { workspace = true, optional = true }
sal-mycelium = { workspace = true, optional = true }
sal-hetzner = { workspace = true, optional = true }
sal-rfs-client = { workspace = true, optional = true }
sal-text = { workspace = true, optional = true }
sal-os = { workspace = true, optional = true }
sal-net = { workspace = true, optional = true }
sal-zinit-client = { workspace = true, optional = true }
sal-process = { workspace = true, optional = true }
sal-virt = { workspace = true, optional = true }
sal-postgresclient = { workspace = true, optional = true }
sal-vault = { workspace = true, optional = true }
sal-rhai = { workspace = true, optional = true }
sal-service-manager = { workspace = true, optional = true }
[features]
default = []
# Individual module features
git = ["dep:sal-git"]
kubernetes = ["dep:sal-kubernetes"]
redisclient = ["dep:sal-redisclient"]
mycelium = ["dep:sal-mycelium"]
hetzner = ["dep:sal-hetzner"]
rfsclient = ["dep:sal-rfs-client"]
text = ["dep:sal-text"]
os = ["dep:sal-os"]
net = ["dep:sal-net"]
zinit_client = ["dep:sal-zinit-client"]
process = ["dep:sal-process"]
virt = ["dep:sal-virt"]
postgresclient = ["dep:sal-postgresclient"]
vault = ["dep:sal-vault"]
rhai = ["dep:sal-rhai"]
# service_manager is removed as it's not a direct member anymore
# Convenience feature groups
core = ["os", "process", "text", "net"]
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium", "hetzner", "rfsclient"]
infrastructure = ["git", "vault", "kubernetes", "virt"]
scripting = ["rhai"]
all = [
"git",
"kubernetes",
"redisclient",
"mycelium",
"hetzner",
"rfsclient",
"text",
"os",
"net",
"zinit_client",
"process",
"virt",
"postgresclient",
"vault",
"rhai",
]
# Examples
[[example]]
name = "postgres_cluster"
path = "examples/kubernetes/clusters/postgres.rs"
required-features = ["kubernetes"]
[[example]]
name = "redis_cluster"
path = "examples/kubernetes/clusters/redis.rs"
required-features = ["kubernetes"]
[[example]]
name = "generic_cluster"
path = "examples/kubernetes/clusters/generic.rs"
required-features = ["kubernetes"]
thiserror = "2.0.12" # For error handling in the main Error enum
sal-git = { path = "git" }
sal-redisclient = { path = "redisclient" }
sal-mycelium = { path = "mycelium" }
sal-text = { path = "text" }
sal-os = { path = "os" }
sal-net = { path = "net" }
sal-zinit-client = { path = "zinit_client" }
sal-process = { path = "process" }
sal-virt = { path = "virt" }
sal-postgresclient = { path = "postgresclient" }
sal-vault = { path = "vault" }
sal-rhai = { path = "rhai" }
sal-rfs-client = { path = "rfs-client" }

View File

@@ -1,239 +0,0 @@
# SAL Publishing Guide
This guide explains how to publish SAL crates to crates.io and how users can consume them.
## 🎯 Publishing Strategy
SAL uses a **modular publishing approach** where each module is published as an individual crate. This allows users to install only the functionality they need, reducing compilation time and binary size.
## 📦 Crate Structure
### Individual Crates
Each SAL module is published as a separate crate:
| Crate Name | Description | Category |
|------------|-------------|----------|
| `sal-os` | Operating system operations | Core |
| `sal-process` | Process management | Core |
| `sal-text` | Text processing utilities | Core |
| `sal-net` | Network operations | Core |
| `sal-git` | Git repository management | Infrastructure |
| `sal-vault` | Cryptographic operations | Infrastructure |
| `sal-kubernetes` | Kubernetes cluster management | Infrastructure |
| `sal-virt` | Virtualization tools (Buildah, nerdctl) | Infrastructure |
| `sal-redisclient` | Redis database client | Clients |
| `sal-postgresclient` | PostgreSQL database client | Clients |
| `sal-zinit-client` | Zinit process supervisor client | Clients |
| `sal-mycelium` | Mycelium network client | Clients |
| `sal-rhai` | Rhai scripting integration | Scripting |
### Meta-crate
The main `sal` crate serves as a meta-crate that re-exports all modules with optional features:
```toml
[dependencies]
sal = { version = "0.1.0", features = ["os", "process", "text"] }
```
## 🚀 Publishing Process
### Prerequisites
1. **Crates.io Account**: Ensure you have a crates.io account and API token
2. **Repository Access**: Ensure the repository URL is accessible
3. **Version Consistency**: All crates should use the same version number
### Publishing Individual Crates
Each crate can be published independently:
```bash
# Publish core modules
cd os && cargo publish
cd ../process && cargo publish
cd ../text && cargo publish
cd ../net && cargo publish
# Publish infrastructure modules
cd ../git && cargo publish
cd ../vault && cargo publish
cd ../kubernetes && cargo publish
cd ../virt && cargo publish
# Publish client modules
cd ../redisclient && cargo publish
cd ../postgresclient && cargo publish
cd ../zinit_client && cargo publish
cd ../mycelium && cargo publish
# Publish scripting module
cd ../rhai && cargo publish
# Finally, publish the meta-crate
cd .. && cargo publish
```
### Automated Publishing
Use the comprehensive publishing script:
```bash
# Test the publishing process (safe)
./scripts/publish-all.sh --dry-run --version 0.1.0
# Actually publish to crates.io
./scripts/publish-all.sh --version 0.1.0
```
The script handles:
-**Dependency order** - Publishes crates in correct dependency order
-**Path dependencies** - Automatically updates path deps to version deps
-**Rate limiting** - Waits between publishes to avoid rate limits
-**Error handling** - Stops on failures with clear error messages
-**Dry run mode** - Test without actually publishing
## 👥 User Consumption
### Installation Options
#### Option 1: Individual Crates (Recommended)
Users install only what they need:
```bash
# Core functionality
cargo add sal-os sal-process sal-text sal-net
# Database operations
cargo add sal-redisclient sal-postgresclient
# Infrastructure management
cargo add sal-git sal-vault sal-kubernetes
# Service integration
cargo add sal-zinit-client sal-mycelium
# Scripting
cargo add sal-rhai
```
**Usage:**
```rust
use sal_os::fs;
use sal_process::run;
use sal_git::GitManager;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let files = fs::list_files(".")?;
let result = run::command("echo hello")?;
let git = GitManager::new(".")?;
Ok(())
}
```
#### Option 2: Meta-crate with Features
Users can use the main crate with selective features:
```bash
# Specific modules
cargo add sal --features os,process,text
# Feature groups
cargo add sal --features core # os, process, text, net
cargo add sal --features clients # redisclient, postgresclient, zinit_client, mycelium
cargo add sal --features infrastructure # git, vault, kubernetes, virt
cargo add sal --features scripting # rhai
# Everything
cargo add sal --features all
```
**Usage:**
```rust
// Cargo.toml: sal = { version = "0.1.0", features = ["os", "process", "git"] }
use sal::os::fs;
use sal::process::run;
use sal::git::GitManager;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let files = fs::list_files(".")?;
let result = run::command("echo hello")?;
let git = GitManager::new(".")?;
Ok(())
}
```
### Feature Groups
The meta-crate provides convenient feature groups:
- **`core`**: Essential system operations (os, process, text, net)
- **`clients`**: Database and service clients (redisclient, postgresclient, zinit_client, mycelium)
- **`infrastructure`**: Infrastructure management tools (git, vault, kubernetes, virt)
- **`scripting`**: Rhai scripting support (rhai)
- **`all`**: Everything included
## 📋 Version Management
### Semantic Versioning
All SAL crates follow semantic versioning:
- **Major version**: Breaking API changes
- **Minor version**: New features, backward compatible
- **Patch version**: Bug fixes, backward compatible
### Synchronized Releases
All crates are released with the same version number to ensure compatibility:
```toml
# All crates use the same version
sal-os = "0.1.0"
sal-process = "0.1.0"
sal-git = "0.1.0"
# etc.
```
## 🔧 Maintenance
### Updating Dependencies
When updating dependencies:
1. Update `Cargo.toml` in the workspace root
2. Update individual crate dependencies if needed
3. Test all crates: `cargo test --workspace`
4. Publish with incremented version numbers
### Adding New Modules
To add a new SAL module:
1. Create the new crate directory
2. Add to workspace members in root `Cargo.toml`
3. Add optional dependency in root `Cargo.toml`
4. Add feature flag in root `Cargo.toml`
5. Add conditional re-export in `src/lib.rs`
6. Update documentation
## 🎉 Benefits
### For Users
- **Minimal Dependencies**: Install only what you need
- **Faster Builds**: Smaller dependency trees compile faster
- **Smaller Binaries**: Reduced binary size
- **Clear Dependencies**: Explicit about what functionality is used
### For Maintainers
- **Independent Releases**: Can release individual crates as needed
- **Focused Testing**: Test individual modules in isolation
- **Clear Ownership**: Each crate has clear responsibility
- **Easier Maintenance**: Smaller, focused codebases
This publishing strategy provides the best of both worlds: modularity for users who want minimal dependencies, and convenience for users who prefer a single crate with features.

298
README.md
View File

@@ -1,136 +1,228 @@
# Herocode Herolib Rust Repository
# SAL (System Abstraction Layer)
## Overview
**Version: 0.1.0**
This repository contains the **Herocode Herolib** Rust library and a collection of scripts, examples, and utilities for building, testing, and publishing the SAL (System Abstraction Layer) crates. The repository includes:
SAL is a comprehensive Rust library designed to provide a unified and simplified interface for a wide array of system-level operations and interactions. It abstracts platform-specific details, enabling developers to write robust, cross-platform code with greater ease. SAL also includes `herodo`, a powerful command-line tool for executing Rhai scripts that leverage SAL's capabilities for automation and system management tasks.
- **Rust crates** for various system components (e.g., `os`, `process`, `text`, `git`, `vault`, `kubernetes`, etc.).
- **Rhai scripts** and test suites for each crate.
- **Utility scripts** to automate common development tasks.
## 🏗️ **Cargo Workspace Structure**
## Scripts
SAL is organized as a **Cargo workspace** with 16 specialized crates:
The repository provides three primary helper scripts located in the repository root:
- **Root Package**: `sal` - Umbrella crate that re-exports all modules
- **13 Library Crates**: Specialized SAL modules (git, text, os, net, etc.)
- **1 Binary Crate**: `herodo` - Rhai script execution engine
- **1 Integration Crate**: `rhai` - Rhai scripting integration layer
| Script | Description | Typical Usage |
|--------|-------------|--------------|
| `scripts/publish-all.sh` | Publishes all SAL crates to **crates.io** in the correct dependency order. Handles version bumping, dependency updates, dryrun mode, and ratelimiting. | `./scripts/publish-all.sh [--dry-run] [--wait <seconds>] [--version <ver>]` |
| `build_herodo.sh` | Builds the `herodo` binary from the `herodo` package and optionally runs a specified Rhai script. | `./build_herodo.sh [script_name]` |
| `run_rhai_tests.sh` | Executes all Rhai test suites across the repository, logging results and providing a summary. | `./run_rhai_tests.sh` |
This workspace structure provides excellent build performance, dependency management, and maintainability.
Below are detailed usage instructions for each script.
### **🚀 Workspace Benefits**
- **Unified Dependency Management**: Shared dependencies across all crates with consistent versions
- **Optimized Build Performance**: Parallel compilation and shared build artifacts
- **Simplified Testing**: Run tests across all modules with a single command
- **Modular Architecture**: Each module is independently maintainable while sharing common infrastructure
- **Production Ready**: 100% test coverage with comprehensive Rhai integration tests
---
## Core Features
## 1. `scripts/publish-all.sh`
SAL offers a broad spectrum of functionalities, including:
### Purpose
- **System Operations**: File and directory management, environment variable access, system information retrieval, and OS-specific commands.
- **Process Management**: Create, monitor, control, and interact with system processes.
- **Containerization Tools**:
- Integration with **Buildah** for building OCI/Docker-compatible container images.
- Integration with **nerdctl** for managing containers (run, stop, list, build, etc.).
- **Version Control**: Programmatic interaction with Git repositories (clone, commit, push, pull, status, etc.).
- **Database Clients**:
- **Redis**: Robust client for interacting with Redis servers.
- **PostgreSQL**: Client for executing queries and managing PostgreSQL databases.
- **Scripting Engine**: In-built support for the **Rhai** scripting language, allowing SAL functionalities to be scripted and automated, primarily through the `herodo` tool.
- **Networking & Services**:
- **Mycelium**: Tools for Mycelium network peer management and message passing.
- **Zinit**: Client for interacting with the Zinit process supervision system.
- **RFS (Remote/Virtual Filesystem)**: Mount, manage, pack, and unpack various types of filesystems (local, SSH, S3, WebDAV).
- **Text Processing**: A suite of utilities for text manipulation, formatting, and regular expressions.
- **Cryptography (`vault`)**: Functions for common cryptographic operations.
- Publishes each SAL crate in the correct dependency order.
- Updates crate versions (if `--version` is supplied).
- Updates path dependencies to version dependencies before publishing.
- Supports **dryrun** mode to preview actions without publishing.
- Handles ratelimiting between crate publishes.
## `herodo`: The SAL Scripting Tool
### Options
| Option | Description |
|--------|-------------|
| `--dry-run` | Shows what would be published without actually publishing. |
| `--wait <seconds>` | Wait time between publishes (default: 15s). |
| `--version <ver>` | Set a new version for all crates (updates `Cargo.toml` files). |
| `-h, --help` | Show help message. |
### Example Usage
```bash
# Dry run no crates will be published
./scripts/publish-all.sh --dry-run
# Publish with a custom wait time and version bump
./scripts/publish-all.sh --wait 30 --version 1.2.3
# Normal publish (no dryrun)
./scripts/publish-all.sh
```
### Notes
- Must be run from the repository root (where `Cargo.toml` lives).
- Requires `cargo` and a loggedin `cargo` session (`cargo login`).
- The script automatically updates dependencies in each crates `Cargo.toml` to use the new version before publishing.
---
## 2. `build_herodo.sh`
### Purpose
- Builds the `herodo` binary from the `herodo` package.
- Copies the binary to a systemwide location (`/usr/local/bin`) if run as root, otherwise to `~/hero/bin`.
- Optionally runs a specified Rhai script after building.
`herodo` is a command-line utility bundled with SAL that executes Rhai scripts. It empowers users to automate tasks and orchestrate complex workflows by leveraging SAL's diverse modules directly from scripts.
### Usage
```bash
# Build only
./build_herodo.sh
# Execute a single Rhai script
herodo script.rhai
# Build and run a specific Rhai script (e.g., `example`):
./build_herodo.sh example
# Execute a script with arguments
herodo script.rhai arg1 arg2
# Execute all .rhai scripts in a directory
herodo /path/to/scripts/
```
### Details
If a directory is provided, `herodo` will execute all `.rhai` scripts within that directory (and its subdirectories) in alphabetical order.
- The script changes to its own directory, builds the `herodo` crate (`cargo build`), and copies the binary.
- If a script name is provided, it looks for the script in:
- `src/rhaiexamples/<name>.rhai`
- `src/herodo/scripts/<name>.rhai`
- If the script is not found, the script exits with an error.
### Scriptable SAL Modules via `herodo`
---
The following SAL modules and functionalities are exposed to the Rhai scripting environment through `herodo`:
## 3. `run_rhai_tests.sh`
- **OS (`os`)**: Comprehensive file system operations, file downloading & installation, and system package management. [Documentation](os/README.md)
- **Process (`process`)**: Robust command and script execution, plus process management (listing, finding, killing, checking command existence). [Documentation](process/README.md)
- **Text (`text`)**: String manipulation, prefixing, path/name fixing, text replacement, and templating. [Documentation](text/README.md)
- **Net (`net`)**: Network operations, HTTP requests, and connectivity utilities. [Documentation](net/README.md)
- **Git (`git`)**: High-level repository management and generic Git command execution with Redis-backed authentication (clone, pull, push, commit, etc.). [Documentation](git/README.md)
- **Vault (`vault`)**: Cryptographic operations, keypair management, encryption, decryption, hashing, etc. [Documentation](vault/README.md)
- **Redis Client (`redisclient`)**: Execute Redis commands (`redis_get`, `redis_set`, `redis_execute`, etc.). [Documentation](redisclient/README.md)
- **PostgreSQL Client (`postgresclient`)**: Execute SQL queries against PostgreSQL databases. [Documentation](postgresclient/README.md)
- **Zinit (`zinit_client`)**: Client for Zinit process supervisor (service management, logs). [Documentation](zinit_client/README.md)
- **Mycelium (`mycelium`)**: Client for Mycelium decentralized networking API (node info, peer management, messaging). [Documentation](mycelium/README.md)
- **Virtualization (`virt`)**:
- **Buildah**: OCI/Docker image building functions. [Documentation](virt/README.md)
- **nerdctl**: Container lifecycle management (`nerdctl_run`, `nerdctl_stop`, `nerdctl_images`, `nerdctl_image_build`, etc.)
- **RFS**: Mount various filesystems (local, SSH, S3, etc.), pack/unpack filesystem layers.
### Purpose
### Example `herodo` Rhai Script
- Runs **all** Rhai test suites across the repository.
- Supports both the legacy `rhai_tests` directory and the newer `*/tests/rhai` layout.
- Logs output to `run_rhai_tests.log` and prints a summary.
```rhai
// file: /opt/scripts/example_task.rhai
### Usage
// OS operations
println("Checking for /tmp/my_app_data...");
if !exist("/tmp/my_app_data") {
mkdir("/tmp/my_app_data");
println("Created directory /tmp/my_app_data");
}
// Redis operations
println("Setting Redis key 'app_status' to 'running'");
redis_set("app_status", "running");
let status = redis_get("app_status");
println("Current app_status from Redis: " + status);
// Process execution
println("Listing files in /tmp:");
let output = run("ls -la /tmp");
println(output.stdout);
println("Script finished.");
```
Run with: `herodo /opt/scripts/example_task.rhai`
For more examples, check the individual module test directories (e.g., `text/tests/rhai/`, `os/tests/rhai/`, etc.) in this repository.
## Using SAL as a Rust Library
Add SAL as a dependency to your `Cargo.toml`:
```toml
[dependencies]
sal = "0.1.0" # Or the latest version
```
### Rust Example: Using Redis Client
```rust
use sal::redisclient::{get_global_client, execute_cmd_with_args};
use redis::RedisResult;
async fn example_redis_interaction() -> RedisResult<()> {
// Get a connection from the global pool
let mut conn = get_global_client().await?.get_async_connection().await?;
// Set a value
execute_cmd_with_args(&mut conn, "SET", vec!["my_key", "my_value"]).await?;
println!("Set 'my_key' to 'my_value'");
// Get a value
let value: String = execute_cmd_with_args(&mut conn, "GET", vec!["my_key"]).await?;
println!("Retrieved value for 'my_key': {}", value);
Ok(())
}
#[tokio::main]
async fn main() {
if let Err(e) = example_redis_interaction().await {
eprintln!("Redis Error: {}", e);
}
}
```
*(Note: The Redis client API might have evolved; please refer to `src/redisclient/mod.rs` and its documentation for the most current usage.)*
## 📦 **Workspace Modules Overview**
SAL is organized as a Cargo workspace with the following crates:
### **Core Library Modules**
- **`sal-os`**: Core OS interactions, file system operations, environment access
- **`sal-process`**: Process creation, management, and control
- **`sal-text`**: Utilities for text processing and manipulation
- **`sal-net`**: Network operations, HTTP requests, and connectivity utilities
### **Integration Modules**
- **`sal-git`**: Git repository management and operations
- **`sal-vault`**: Cryptographic functions and keypair management
- **`sal-rhai`**: Integration layer for the Rhai scripting engine, used by `herodo`
### **Client Modules**
- **`sal-redisclient`**: Client for Redis database interactions
- **`sal-postgresclient`**: Client for PostgreSQL database interactions
- **`sal-zinit-client`**: Client for Zinit process supervisor
- **`sal-mycelium`**: Client for Mycelium network operations
### **Specialized Modules**
- **`sal-virt`**: Virtualization-related utilities (buildah, nerdctl, rfs)
### **Root Package & Binary**
- **`sal`**: Root umbrella crate that re-exports all modules
- **`herodo`**: Command-line binary for executing Rhai scripts
## 🔨 **Building SAL**
Build the entire workspace (all crates) using Cargo:
```bash
# Run all tests
# Build all workspace members
cargo build --workspace
# Build for release
cargo build --workspace --release
# Build specific crate
cargo build -p sal-text
cargo build -p herodo
```
The `herodo` executable will be located at `target/debug/herodo` or `target/release/herodo`.
## 🧪 **Running Tests**
### **Rust Unit Tests**
```bash
# Run all workspace tests
cargo test --workspace
# Run tests for specific crate
cargo test -p sal-text
cargo test -p sal-os
# Run only library tests (faster)
cargo test --workspace --lib
```
### **Rhai Integration Tests**
Run comprehensive Rhai script tests that exercise `herodo` and SAL's scripted functionalities:
```bash
# Run all Rhai integration tests (16 modules)
./run_rhai_tests.sh
# Results: 16/16 modules pass with 100% success rate
```
### Output
- Colored console output for readability.
- Log file (`run_rhai_tests.log`) contains full output for later review.
- Summary includes total modules, passed, and failed counts.
- Exit code `0` if all tests pass, `1` otherwise.
---
## General Development Workflow
1. **Build**: Use `build_herodo.sh` to compile the `herodo` binary.
2. **Test**: Run `run_rhai_tests.sh` to ensure all Rhai scripts pass.
3. **Publish**: When ready to release, use `scripts/publish-all.sh` (with `--dry-run` first to verify).
## Prerequisites
- **Rust toolchain** (`cargo`, `rustc`) installed.
- **Rhai** interpreter (`herodo`) built and available.
- **Git** for version control.
- **Cargo login** for publishing to crates.io.
The Rhai tests validate real-world functionality across all SAL modules and provide comprehensive integration testing.
## License
See `LICENSE` for details.
---
**Happy coding!**
SAL is licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for details.

View File

View File

@@ -1,14 +0,0 @@
# Environment Configuration
To set up your environment variables:
1. Copy the template file to `env.sh`:
```bash
cp config/myenv_templ.sh config/env.sh
```
2. Edit `config/env.sh` and fill in your specific values for the variables.
3. This file (`config/env.sh`) is excluded from version control by the project's `.gitignore` configuration, ensuring your sensitive information remains local and is never committed to the repository.

View File

@@ -1,6 +0,0 @@
export OPENROUTER_API_KEY=""
export GROQ_API_KEY=""
export CEREBRAS_API_KEY=""
export OPENAI_API_KEY="sk-xxxxxxx"

View File

@@ -1,76 +1,64 @@
# SAL Vault Examples
# Hero Vault Cryptography Examples
This directory contains examples demonstrating the SAL Vault functionality.
This directory contains examples demonstrating the Hero Vault cryptography functionality integrated into the SAL project.
## Overview
SAL Vault provides secure key management and cryptographic operations including:
Hero Vault provides cryptographic operations including:
- Vault creation and management
- KeySpace operations (encrypted key-value stores)
- Symmetric key generation and operations
- Asymmetric key operations (signing and verification)
- Secure key derivation from passwords
- Key space management (creation, loading, encryption, decryption)
- Keypair management (creation, selection, listing)
- Digital signatures (signing and verification)
- Symmetric encryption (key generation, encryption, decryption)
- Ethereum wallet functionality
- Smart contract interactions
- Key-value store with encryption
## Current Status
## Example Files
⚠️ **Note**: The vault module is currently being updated to use Lee's implementation.
The Rhai scripting integration is temporarily disabled while we adapt the examples
to work with the new vault API.
- `example.rhai` - Basic example demonstrating key management, signing, and encryption
- `advanced_example.rhai` - Advanced example with error handling, conditional logic, and more complex operations
- `key_persistence_example.rhai` - Demonstrates creating and saving a key space to disk
- `load_existing_space.rhai` - Shows how to load a previously created key space and use its keypairs
- `contract_example.rhai` - Demonstrates loading a contract ABI and interacting with smart contracts
- `agung_send_transaction.rhai` - Demonstrates sending native tokens on the Agung network
- `agung_contract_with_args.rhai` - Shows how to interact with contracts with arguments on Agung
## Available Operations
## Running the Examples
- **Vault Management**: Create and manage vault instances
- **KeySpace Operations**: Open encrypted key-value stores within vaults
- **Symmetric Encryption**: Generate keys and encrypt/decrypt data
- **Asymmetric Operations**: Create keypairs, sign messages, verify signatures
You can run the examples using the `herodo` tool that comes with the SAL project:
## Example Files (Legacy - Sameh's Implementation)
```bash
# Run a single example
herodo --path example.rhai
⚠️ **These examples are currently archived and use the previous vault implementation**:
- `_archive/example.rhai` - Basic example demonstrating key management, signing, and encryption
- `_archive/advanced_example.rhai` - Advanced example with error handling and complex operations
- `_archive/key_persistence_example.rhai` - Demonstrates creating and saving a key space to disk
- `_archive/load_existing_space.rhai` - Shows how to load a previously created key space
- `_archive/contract_example.rhai` - Demonstrates smart contract interactions (Ethereum)
- `_archive/agung_send_transaction.rhai` - Demonstrates Ethereum transactions on Agung network
- `_archive/agung_contract_with_args.rhai` - Shows contract interactions with arguments
## Current Implementation (Lee's Vault)
The current vault implementation provides:
```rust
// Create a new vault
let vault = Vault::new(&path).await?;
// Open an encrypted keyspace
let keyspace = vault.open_keyspace("my_space", "password").await?;
// Perform cryptographic operations
// (API documentation coming soon)
# Run all examples using the provided script
./run_examples.sh
```
## Migration Status
## Key Space Storage
-**Vault Core**: Lee's implementation is active
-**Archive**: Sameh's implementation preserved in `vault/_archive/`
-**Rhai Integration**: Being developed for Lee's implementation
-**Examples**: Will be updated to use Lee's API
-**Ethereum Features**: Not available in Lee's implementation
Key spaces are stored in the `~/.hero-vault/key-spaces/` directory by default. Each key space is stored in a separate JSON file named after the key space (e.g., `my_space.json`).
## Ethereum Functionality
The Hero Vault module provides comprehensive Ethereum wallet functionality:
- Creating and managing wallets for different networks
- Sending ETH transactions
- Checking balances
- Interacting with smart contracts (read and write functions)
- Support for multiple networks (Ethereum, Gnosis, Peaq, Agung, etc.)
## Security
The vault uses:
Key spaces are encrypted with ChaCha20Poly1305 using a key derived from the provided password. The encryption ensures that the key material is secure at rest.
- **ChaCha20Poly1305** for symmetric encryption
- **Password-based key derivation** for keyspace encryption
- **Secure key storage** with proper isolation
## Best Practices
## Next Steps
1. **Rhai Integration**: Implement Rhai bindings for Lee's vault
2. **New Examples**: Create examples using Lee's simpler API
3. **Documentation**: Complete API documentation for Lee's implementation
4. **Migration Guide**: Provide guidance for users migrating from Sameh's implementation
1. **Use Strong Passwords**: Since the security of your key spaces depends on the strength of your passwords, use strong, unique passwords.
2. **Backup Key Spaces**: Regularly backup your key spaces directory to prevent data loss.
3. **Script Organization**: Split your scripts into logical units, with separate scripts for key creation and key usage.
4. **Error Handling**: Always check the return values of functions to ensure operations succeeded before proceeding.
5. **Network Selection**: When working with Ethereum functionality, be explicit about which network you're targeting to avoid confusion.
6. **Gas Management**: For Ethereum transactions, consider gas costs and set appropriate gas limits.

View File

@@ -1,72 +0,0 @@
//! Basic Kubernetes operations example
//!
//! This script demonstrates basic Kubernetes operations using the SAL Kubernetes module.
//!
//! Prerequisites:
//! - A running Kubernetes cluster
//! - Valid kubeconfig file or in-cluster configuration
//! - Appropriate permissions for the operations
//!
//! Usage:
//! herodo examples/kubernetes/basic_operations.rhai
print("=== SAL Kubernetes Basic Operations Example ===");
// Create a KubernetesManager for the default namespace
print("Creating KubernetesManager for 'default' namespace...");
let km = kubernetes_manager_new("default");
print("✓ KubernetesManager created for namespace: " + namespace(km));
// List all pods in the namespace
print("\n--- Listing Pods ---");
let pods = pods_list(km);
print("Found " + pods.len() + " pods in the namespace:");
for pod in pods {
print(" - " + pod);
}
// List all services in the namespace
print("\n--- Listing Services ---");
let services = services_list(km);
print("Found " + services.len() + " services in the namespace:");
for service in services {
print(" - " + service);
}
// List all deployments in the namespace
print("\n--- Listing Deployments ---");
let deployments = deployments_list(km);
print("Found " + deployments.len() + " deployments in the namespace:");
for deployment in deployments {
print(" - " + deployment);
}
// Get resource counts
print("\n--- Resource Counts ---");
let counts = resource_counts(km);
print("Resource counts in namespace '" + namespace(km) + "':");
for resource_type in counts.keys() {
print(" " + resource_type + ": " + counts[resource_type]);
}
// List all namespaces (cluster-wide operation)
print("\n--- Listing All Namespaces ---");
let namespaces = namespaces_list(km);
print("Found " + namespaces.len() + " namespaces in the cluster:");
for ns in namespaces {
print(" - " + ns);
}
// Check if specific namespaces exist
print("\n--- Checking Namespace Existence ---");
let test_namespaces = ["default", "kube-system", "non-existent-namespace"];
for ns in test_namespaces {
let exists = namespace_exists(km, ns);
if exists {
print("✓ Namespace '" + ns + "' exists");
} else {
print("✗ Namespace '" + ns + "' does not exist");
}
}
print("\n=== Example completed successfully! ===");

View File

@@ -1,134 +0,0 @@
//! Generic Application Deployment Example
//!
//! This example shows how to deploy any containerized application using the
//! KubernetesManager convenience methods. This works for any Docker image.
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create Kubernetes manager
let km = KubernetesManager::new("default").await?;
// Clean up any existing resources first
println!("=== Cleaning up existing resources ===");
let apps_to_clean = ["web-server", "node-app", "mongodb"];
for app in &apps_to_clean {
match km.deployment_delete(app).await {
Ok(_) => println!("✓ Deleted existing deployment: {}", app),
Err(_) => println!("✓ No existing deployment to delete: {}", app),
}
match km.service_delete(app).await {
Ok(_) => println!("✓ Deleted existing service: {}", app),
Err(_) => println!("✓ No existing service to delete: {}", app),
}
}
// Example 1: Simple web server deployment
println!("\n=== Example 1: Simple Nginx Web Server ===");
km.deploy_application("web-server", "nginx:latest", 2, 80, None, None)
.await?;
println!("✅ Nginx web server deployed!");
// Example 2: Node.js application with labels
println!("\n=== Example 2: Node.js Application ===");
let mut node_labels = HashMap::new();
node_labels.insert("app".to_string(), "node-app".to_string());
node_labels.insert("tier".to_string(), "backend".to_string());
node_labels.insert("environment".to_string(), "production".to_string());
// Configure Node.js environment variables
let mut node_env_vars = HashMap::new();
node_env_vars.insert("NODE_ENV".to_string(), "production".to_string());
node_env_vars.insert("PORT".to_string(), "3000".to_string());
node_env_vars.insert("LOG_LEVEL".to_string(), "info".to_string());
node_env_vars.insert("MAX_CONNECTIONS".to_string(), "1000".to_string());
km.deploy_application(
"node-app", // name
"node:18-alpine", // image
3, // replicas - scale to 3 instances
3000, // port
Some(node_labels), // labels
Some(node_env_vars), // environment variables
)
.await?;
println!("✅ Node.js application deployed!");
// Example 3: Database deployment (any database)
println!("\n=== Example 3: MongoDB Database ===");
let mut mongo_labels = HashMap::new();
mongo_labels.insert("app".to_string(), "mongodb".to_string());
mongo_labels.insert("type".to_string(), "database".to_string());
mongo_labels.insert("engine".to_string(), "mongodb".to_string());
// Configure MongoDB environment variables
let mut mongo_env_vars = HashMap::new();
mongo_env_vars.insert(
"MONGO_INITDB_ROOT_USERNAME".to_string(),
"admin".to_string(),
);
mongo_env_vars.insert(
"MONGO_INITDB_ROOT_PASSWORD".to_string(),
"mongopassword".to_string(),
);
mongo_env_vars.insert("MONGO_INITDB_DATABASE".to_string(), "myapp".to_string());
km.deploy_application(
"mongodb", // name
"mongo:6.0", // image
1, // replicas - single instance for simplicity
27017, // port
Some(mongo_labels), // labels
Some(mongo_env_vars), // environment variables
)
.await?;
println!("✅ MongoDB deployed!");
// Check status of all deployments
println!("\n=== Checking Deployment Status ===");
let deployments = km.deployments_list().await?;
for deployment in &deployments {
if let Some(name) = &deployment.metadata.name {
let total_replicas = deployment
.spec
.as_ref()
.and_then(|s| s.replicas)
.unwrap_or(0);
let ready_replicas = deployment
.status
.as_ref()
.and_then(|s| s.ready_replicas)
.unwrap_or(0);
println!(
"{}: {}/{} replicas ready",
name, ready_replicas, total_replicas
);
}
}
println!("\n🎉 All deployments completed!");
println!("\n💡 Key Points:");
println!(" • Any Docker image can be deployed using this simple interface");
println!(" • Use labels to organize and identify your applications");
println!(
" • The same method works for databases, web servers, APIs, and any containerized app"
);
println!(" • For advanced configuration, use the individual KubernetesManager methods");
println!(
" • Environment variables and resource limits can be added via direct Kubernetes API"
);
Ok(())
}

View File

@@ -1,79 +0,0 @@
//! PostgreSQL Cluster Deployment Example (Rhai)
//!
//! This script shows how to deploy a PostgreSQL cluster using Rhai scripting
//! with the KubernetesManager convenience methods.
print("=== PostgreSQL Cluster Deployment ===");
// Create Kubernetes manager for the database namespace
print("Creating Kubernetes manager for 'database' namespace...");
let km = kubernetes_manager_new("database");
print("✓ Kubernetes manager created");
// Create the namespace if it doesn't exist
print("Creating namespace 'database' if it doesn't exist...");
try {
create_namespace(km, "database");
print("✓ Namespace 'database' created");
} catch(e) {
if e.to_string().contains("already exists") {
print("✓ Namespace 'database' already exists");
} else {
print("⚠️ Warning: " + e);
}
}
// Clean up any existing resources first
print("\nCleaning up any existing PostgreSQL resources...");
try {
delete_deployment(km, "postgres-cluster");
print("✓ Deleted existing deployment");
} catch(e) {
print("✓ No existing deployment to delete");
}
try {
delete_service(km, "postgres-cluster");
print("✓ Deleted existing service");
} catch(e) {
print("✓ No existing service to delete");
}
// Create PostgreSQL cluster using the convenience method
print("\nDeploying PostgreSQL cluster...");
try {
// Deploy PostgreSQL using the convenience method
let result = deploy_application(km, "postgres-cluster", "postgres:15", 2, 5432, #{
"app": "postgres-cluster",
"type": "database",
"engine": "postgresql"
}, #{
"POSTGRES_DB": "myapp",
"POSTGRES_USER": "postgres",
"POSTGRES_PASSWORD": "secretpassword",
"PGDATA": "/var/lib/postgresql/data/pgdata"
});
print("✓ " + result);
print("\n✅ PostgreSQL cluster deployed successfully!");
print("\n📋 Connection Information:");
print(" Host: postgres-cluster.database.svc.cluster.local");
print(" Port: 5432");
print(" Database: postgres (default)");
print(" Username: postgres (default)");
print("\n🔧 To connect from another pod:");
print(" psql -h postgres-cluster.database.svc.cluster.local -U postgres");
print("\n💡 Next steps:");
print(" • Set POSTGRES_PASSWORD environment variable");
print(" • Configure persistent storage");
print(" • Set up backup and monitoring");
} catch(e) {
print("❌ Failed to deploy PostgreSQL cluster: " + e);
}
print("\n=== Deployment Complete ===");

View File

@@ -1,112 +0,0 @@
//! PostgreSQL Cluster Deployment Example
//!
//! This example shows how to deploy a PostgreSQL cluster using the
//! KubernetesManager convenience methods.
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create Kubernetes manager for the database namespace
let km = KubernetesManager::new("database").await?;
// Create the namespace if it doesn't exist
println!("Creating namespace 'database' if it doesn't exist...");
match km.namespace_create("database").await {
Ok(_) => println!("✓ Namespace 'database' created"),
Err(e) => {
if e.to_string().contains("already exists") {
println!("✓ Namespace 'database' already exists");
} else {
return Err(e.into());
}
}
}
// Clean up any existing resources first
println!("Cleaning up any existing PostgreSQL resources...");
match km.deployment_delete("postgres-cluster").await {
Ok(_) => println!("✓ Deleted existing deployment"),
Err(_) => println!("✓ No existing deployment to delete"),
}
match km.service_delete("postgres-cluster").await {
Ok(_) => println!("✓ Deleted existing service"),
Err(_) => println!("✓ No existing service to delete"),
}
// Configure PostgreSQL-specific labels
let mut labels = HashMap::new();
labels.insert("app".to_string(), "postgres-cluster".to_string());
labels.insert("type".to_string(), "database".to_string());
labels.insert("engine".to_string(), "postgresql".to_string());
// Configure PostgreSQL environment variables
let mut env_vars = HashMap::new();
env_vars.insert("POSTGRES_DB".to_string(), "myapp".to_string());
env_vars.insert("POSTGRES_USER".to_string(), "postgres".to_string());
env_vars.insert(
"POSTGRES_PASSWORD".to_string(),
"secretpassword".to_string(),
);
env_vars.insert(
"PGDATA".to_string(),
"/var/lib/postgresql/data/pgdata".to_string(),
);
// Deploy the PostgreSQL cluster using the convenience method
println!("Deploying PostgreSQL cluster...");
km.deploy_application(
"postgres-cluster", // name
"postgres:15", // image
2, // replicas (1 master + 1 replica)
5432, // port
Some(labels), // labels
Some(env_vars), // environment variables
)
.await?;
println!("✅ PostgreSQL cluster deployed successfully!");
// Check deployment status
let deployments = km.deployments_list().await?;
let postgres_deployment = deployments
.iter()
.find(|d| d.metadata.name.as_ref() == Some(&"postgres-cluster".to_string()));
if let Some(deployment) = postgres_deployment {
let total_replicas = deployment
.spec
.as_ref()
.and_then(|s| s.replicas)
.unwrap_or(0);
let ready_replicas = deployment
.status
.as_ref()
.and_then(|s| s.ready_replicas)
.unwrap_or(0);
println!(
"Deployment status: {}/{} replicas ready",
ready_replicas, total_replicas
);
}
println!("\n📋 Connection Information:");
println!(" Host: postgres-cluster.database.svc.cluster.local");
println!(" Port: 5432");
println!(" Database: postgres (default)");
println!(" Username: postgres (default)");
println!(" Password: Set POSTGRES_PASSWORD environment variable");
println!("\n🔧 To connect from another pod:");
println!(" psql -h postgres-cluster.database.svc.cluster.local -U postgres");
println!("\n💡 Next steps:");
println!(" • Set environment variables for database credentials");
println!(" • Add persistent volume claims for data storage");
println!(" • Configure backup and monitoring");
Ok(())
}

View File

@@ -1,79 +0,0 @@
//! Redis Cluster Deployment Example (Rhai)
//!
//! This script shows how to deploy a Redis cluster using Rhai scripting
//! with the KubernetesManager convenience methods.
print("=== Redis Cluster Deployment ===");
// Create Kubernetes manager for the cache namespace
print("Creating Kubernetes manager for 'cache' namespace...");
let km = kubernetes_manager_new("cache");
print("✓ Kubernetes manager created");
// Create the namespace if it doesn't exist
print("Creating namespace 'cache' if it doesn't exist...");
try {
create_namespace(km, "cache");
print("✓ Namespace 'cache' created");
} catch(e) {
if e.to_string().contains("already exists") {
print("✓ Namespace 'cache' already exists");
} else {
print("⚠️ Warning: " + e);
}
}
// Clean up any existing resources first
print("\nCleaning up any existing Redis resources...");
try {
delete_deployment(km, "redis-cluster");
print("✓ Deleted existing deployment");
} catch(e) {
print("✓ No existing deployment to delete");
}
try {
delete_service(km, "redis-cluster");
print("✓ Deleted existing service");
} catch(e) {
print("✓ No existing service to delete");
}
// Create Redis cluster using the convenience method
print("\nDeploying Redis cluster...");
try {
// Deploy Redis using the convenience method
let result = deploy_application(km, "redis-cluster", "redis:7-alpine", 3, 6379, #{
"app": "redis-cluster",
"type": "cache",
"engine": "redis"
}, #{
"REDIS_PASSWORD": "redispassword",
"REDIS_PORT": "6379",
"REDIS_DATABASES": "16",
"REDIS_MAXMEMORY": "256mb",
"REDIS_MAXMEMORY_POLICY": "allkeys-lru"
});
print("✓ " + result);
print("\n✅ Redis cluster deployed successfully!");
print("\n📋 Connection Information:");
print(" Host: redis-cluster.cache.svc.cluster.local");
print(" Port: 6379");
print("\n🔧 To connect from another pod:");
print(" redis-cli -h redis-cluster.cache.svc.cluster.local");
print("\n💡 Next steps:");
print(" • Configure Redis authentication");
print(" • Set up Redis clustering configuration");
print(" • Add persistent storage");
print(" • Configure memory policies");
} catch(e) {
print("❌ Failed to deploy Redis cluster: " + e);
}
print("\n=== Deployment Complete ===");

View File

@@ -1,109 +0,0 @@
//! Redis Cluster Deployment Example
//!
//! This example shows how to deploy a Redis cluster using the
//! KubernetesManager convenience methods.
use sal_kubernetes::KubernetesManager;
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create Kubernetes manager for the cache namespace
let km = KubernetesManager::new("cache").await?;
// Create the namespace if it doesn't exist
println!("Creating namespace 'cache' if it doesn't exist...");
match km.namespace_create("cache").await {
Ok(_) => println!("✓ Namespace 'cache' created"),
Err(e) => {
if e.to_string().contains("already exists") {
println!("✓ Namespace 'cache' already exists");
} else {
return Err(e.into());
}
}
}
// Clean up any existing resources first
println!("Cleaning up any existing Redis resources...");
match km.deployment_delete("redis-cluster").await {
Ok(_) => println!("✓ Deleted existing deployment"),
Err(_) => println!("✓ No existing deployment to delete"),
}
match km.service_delete("redis-cluster").await {
Ok(_) => println!("✓ Deleted existing service"),
Err(_) => println!("✓ No existing service to delete"),
}
// Configure Redis-specific labels
let mut labels = HashMap::new();
labels.insert("app".to_string(), "redis-cluster".to_string());
labels.insert("type".to_string(), "cache".to_string());
labels.insert("engine".to_string(), "redis".to_string());
// Configure Redis environment variables
let mut env_vars = HashMap::new();
env_vars.insert("REDIS_PASSWORD".to_string(), "redispassword".to_string());
env_vars.insert("REDIS_PORT".to_string(), "6379".to_string());
env_vars.insert("REDIS_DATABASES".to_string(), "16".to_string());
env_vars.insert("REDIS_MAXMEMORY".to_string(), "256mb".to_string());
env_vars.insert(
"REDIS_MAXMEMORY_POLICY".to_string(),
"allkeys-lru".to_string(),
);
// Deploy the Redis cluster using the convenience method
println!("Deploying Redis cluster...");
km.deploy_application(
"redis-cluster", // name
"redis:7-alpine", // image
3, // replicas (Redis cluster nodes)
6379, // port
Some(labels), // labels
Some(env_vars), // environment variables
)
.await?;
println!("✅ Redis cluster deployed successfully!");
// Check deployment status
let deployments = km.deployments_list().await?;
let redis_deployment = deployments
.iter()
.find(|d| d.metadata.name.as_ref() == Some(&"redis-cluster".to_string()));
if let Some(deployment) = redis_deployment {
let total_replicas = deployment
.spec
.as_ref()
.and_then(|s| s.replicas)
.unwrap_or(0);
let ready_replicas = deployment
.status
.as_ref()
.and_then(|s| s.ready_replicas)
.unwrap_or(0);
println!(
"Deployment status: {}/{} replicas ready",
ready_replicas, total_replicas
);
}
println!("\n📋 Connection Information:");
println!(" Host: redis-cluster.cache.svc.cluster.local");
println!(" Port: 6379");
println!(" Password: Configure REDIS_PASSWORD environment variable");
println!("\n🔧 To connect from another pod:");
println!(" redis-cli -h redis-cluster.cache.svc.cluster.local");
println!("\n💡 Next steps:");
println!(" • Configure Redis authentication with environment variables");
println!(" • Set up Redis clustering configuration");
println!(" • Add persistent volume claims for data persistence");
println!(" • Configure memory limits and eviction policies");
Ok(())
}

View File

@@ -1,208 +0,0 @@
//! Multi-namespace Kubernetes operations example
//!
//! This script demonstrates working with multiple namespaces and comparing resources across them.
//!
//! Prerequisites:
//! - A running Kubernetes cluster
//! - Valid kubeconfig file or in-cluster configuration
//! - Appropriate permissions for the operations
//!
//! Usage:
//! herodo examples/kubernetes/multi_namespace_operations.rhai
print("=== SAL Kubernetes Multi-Namespace Operations Example ===");
// Define namespaces to work with
let target_namespaces = ["default", "kube-system"];
let managers = #{};
print("Creating managers for multiple namespaces...");
// Create managers for each namespace
for ns in target_namespaces {
try {
let km = kubernetes_manager_new(ns);
managers[ns] = km;
print("✓ Created manager for namespace: " + ns);
} catch(e) {
print("✗ Failed to create manager for " + ns + ": " + e);
}
}
// Function to safely get resource counts
fn get_safe_counts(km) {
try {
return resource_counts(km);
} catch(e) {
print(" Warning: Could not get resource counts - " + e);
return #{};
}
}
// Function to safely get pod list
fn get_safe_pods(km) {
try {
return pods_list(km);
} catch(e) {
print(" Warning: Could not list pods - " + e);
return [];
}
}
// Compare resource counts across namespaces
print("\n--- Resource Comparison Across Namespaces ---");
let total_resources = #{};
for ns in target_namespaces {
if ns in managers {
let km = managers[ns];
print("\nNamespace: " + ns);
let counts = get_safe_counts(km);
for resource_type in counts.keys() {
let count = counts[resource_type];
print(" " + resource_type + ": " + count);
// Accumulate totals
if resource_type in total_resources {
total_resources[resource_type] = total_resources[resource_type] + count;
} else {
total_resources[resource_type] = count;
}
}
}
}
print("\n--- Total Resources Across All Namespaces ---");
for resource_type in total_resources.keys() {
print("Total " + resource_type + ": " + total_resources[resource_type]);
}
// Find namespaces with the most resources
print("\n--- Namespace Resource Analysis ---");
let namespace_totals = #{};
for ns in target_namespaces {
if ns in managers {
let km = managers[ns];
let counts = get_safe_counts(km);
let total = 0;
for resource_type in counts.keys() {
total = total + counts[resource_type];
}
namespace_totals[ns] = total;
print("Namespace '" + ns + "' has " + total + " total resources");
}
}
// Find the busiest namespace
let busiest_ns = "";
let max_resources = 0;
for ns in namespace_totals.keys() {
if namespace_totals[ns] > max_resources {
max_resources = namespace_totals[ns];
busiest_ns = ns;
}
}
if busiest_ns != "" {
print("🏆 Busiest namespace: '" + busiest_ns + "' with " + max_resources + " resources");
}
// Detailed pod analysis
print("\n--- Pod Analysis Across Namespaces ---");
let all_pods = [];
for ns in target_namespaces {
if ns in managers {
let km = managers[ns];
let pods = get_safe_pods(km);
print("\nNamespace '" + ns + "' pods:");
if pods.len() == 0 {
print(" (no pods)");
} else {
for pod in pods {
print(" - " + pod);
all_pods.push(ns + "/" + pod);
}
}
}
}
print("\n--- All Pods Summary ---");
print("Total pods across all namespaces: " + all_pods.len());
// Look for common pod name patterns
print("\n--- Pod Name Pattern Analysis ---");
let patterns = #{
"system": 0,
"kube": 0,
"coredns": 0,
"proxy": 0,
"controller": 0
};
for pod_full_name in all_pods {
let pod_name = pod_full_name.to_lower();
for pattern in patterns.keys() {
if pod_name.contains(pattern) {
patterns[pattern] = patterns[pattern] + 1;
}
}
}
print("Common pod name patterns found:");
for pattern in patterns.keys() {
if patterns[pattern] > 0 {
print(" '" + pattern + "': " + patterns[pattern] + " pods");
}
}
// Namespace health check
print("\n--- Namespace Health Check ---");
for ns in target_namespaces {
if ns in managers {
let km = managers[ns];
print("\nChecking namespace: " + ns);
// Check if namespace exists (should always be true for our managers)
let exists = namespace_exists(km, ns);
if exists {
print(" ✓ Namespace exists and is accessible");
} else {
print(" ✗ Namespace existence check failed");
}
// Try to get resource counts as a health indicator
let counts = get_safe_counts(km);
if counts.len() > 0 {
print(" ✓ Can access resources (" + counts.len() + " resource types)");
} else {
print(" ⚠ No resources found or access limited");
}
}
}
// Create a summary report
print("\n--- Summary Report ---");
print("Namespaces analyzed: " + target_namespaces.len());
print("Total unique resource types: " + total_resources.len());
let grand_total = 0;
for resource_type in total_resources.keys() {
grand_total = grand_total + total_resources[resource_type];
}
print("Grand total resources: " + grand_total);
print("\nResource breakdown:");
for resource_type in total_resources.keys() {
let count = total_resources[resource_type];
let percentage = (count * 100) / grand_total;
print(" " + resource_type + ": " + count + " (" + percentage + "%)");
}
print("\n=== Multi-namespace operations example completed! ===");

View File

@@ -1,95 +0,0 @@
//! Kubernetes namespace management example
//!
//! This script demonstrates namespace creation and management operations.
//!
//! Prerequisites:
//! - A running Kubernetes cluster
//! - Valid kubeconfig file or in-cluster configuration
//! - Permissions to create and manage namespaces
//!
//! Usage:
//! herodo examples/kubernetes/namespace_management.rhai
print("=== SAL Kubernetes Namespace Management Example ===");
// Create a KubernetesManager
let km = kubernetes_manager_new("default");
print("Created KubernetesManager for namespace: " + namespace(km));
// Define test namespace names
let test_namespaces = [
"sal-test-namespace-1",
"sal-test-namespace-2",
"sal-example-app"
];
print("\n--- Creating Test Namespaces ---");
for ns in test_namespaces {
print("Creating namespace: " + ns);
try {
namespace_create(km, ns);
print("✓ Successfully created namespace: " + ns);
} catch(e) {
print("✗ Failed to create namespace " + ns + ": " + e);
}
}
// Wait a moment for namespaces to be created
print("\nWaiting for namespaces to be ready...");
// Verify namespaces were created
print("\n--- Verifying Namespace Creation ---");
for ns in test_namespaces {
let exists = namespace_exists(km, ns);
if exists {
print("✓ Namespace '" + ns + "' exists");
} else {
print("✗ Namespace '" + ns + "' was not found");
}
}
// List all namespaces to see our new ones
print("\n--- Current Namespaces ---");
let all_namespaces = namespaces_list(km);
print("Total namespaces in cluster: " + all_namespaces.len());
for ns in all_namespaces {
if ns.starts_with("sal-") {
print(" 🔹 " + ns + " (created by this example)");
} else {
print(" - " + ns);
}
}
// Test idempotent creation (creating the same namespace again)
print("\n--- Testing Idempotent Creation ---");
let test_ns = test_namespaces[0];
print("Attempting to create existing namespace: " + test_ns);
try {
namespace_create(km, test_ns);
print("✓ Idempotent creation successful (no error for existing namespace)");
} catch(e) {
print("✗ Unexpected error during idempotent creation: " + e);
}
// Create managers for the new namespaces and check their properties
print("\n--- Creating Managers for New Namespaces ---");
for ns in test_namespaces {
try {
let ns_km = kubernetes_manager_new(ns);
print("✓ Created manager for namespace: " + namespace(ns_km));
// Get resource counts for the new namespace (should be mostly empty)
let counts = resource_counts(ns_km);
print(" Resource counts: " + counts);
} catch(e) {
print("✗ Failed to create manager for " + ns + ": " + e);
}
}
print("\n--- Cleanup Instructions ---");
print("To clean up the test namespaces created by this example, run:");
for ns in test_namespaces {
print(" kubectl delete namespace " + ns);
}
print("\n=== Namespace management example completed! ===");

View File

@@ -1,157 +0,0 @@
//! Kubernetes pattern-based deletion example
//!
//! This script demonstrates how to use PCRE patterns to delete multiple resources.
//!
//! ⚠️ WARNING: This example includes actual deletion operations!
//! ⚠️ Only run this in a test environment!
//!
//! Prerequisites:
//! - A running Kubernetes cluster (preferably a test cluster)
//! - Valid kubeconfig file or in-cluster configuration
//! - Permissions to delete resources
//!
//! Usage:
//! herodo examples/kubernetes/pattern_deletion.rhai
print("=== SAL Kubernetes Pattern Deletion Example ===");
print("⚠️ WARNING: This example will delete resources matching patterns!");
print("⚠️ Only run this in a test environment!");
// Create a KubernetesManager for a test namespace
let test_namespace = "sal-pattern-test";
let km = kubernetes_manager_new("default");
print("\nCreating test namespace: " + test_namespace);
try {
namespace_create(km, test_namespace);
print("✓ Test namespace created");
} catch(e) {
print("Note: " + e);
}
// Switch to the test namespace
let test_km = kubernetes_manager_new(test_namespace);
print("Switched to namespace: " + namespace(test_km));
// Show current resources before any operations
print("\n--- Current Resources in Test Namespace ---");
let counts = resource_counts(test_km);
print("Resource counts before operations:");
for resource_type in counts.keys() {
print(" " + resource_type + ": " + counts[resource_type]);
}
// List current pods to see what we're working with
let current_pods = pods_list(test_km);
print("\nCurrent pods in namespace:");
if current_pods.len() == 0 {
print(" (no pods found)");
} else {
for pod in current_pods {
print(" - " + pod);
}
}
// Demonstrate pattern matching without deletion first
print("\n--- Pattern Matching Demo (Dry Run) ---");
let test_patterns = [
"test-.*", // Match anything starting with "test-"
".*-temp$", // Match anything ending with "-temp"
"demo-pod-.*", // Match demo pods
"nginx-.*", // Match nginx pods
"app-[0-9]+", // Match app-1, app-2, etc.
];
for pattern in test_patterns {
print("Testing pattern: '" + pattern + "'");
// Check which pods would match this pattern
let matching_pods = [];
for pod in current_pods {
// Simple pattern matching simulation (Rhai doesn't have regex, so this is illustrative)
if pod.contains("test") && pattern == "test-.*" {
matching_pods.push(pod);
} else if pod.contains("temp") && pattern == ".*-temp$" {
matching_pods.push(pod);
} else if pod.contains("demo") && pattern == "demo-pod-.*" {
matching_pods.push(pod);
} else if pod.contains("nginx") && pattern == "nginx-.*" {
matching_pods.push(pod);
}
}
print(" Would match " + matching_pods.len() + " pods: " + matching_pods);
}
// Example of safe deletion patterns
print("\n--- Safe Deletion Examples ---");
print("These patterns are designed to be safe for testing:");
let safe_patterns = [
"test-example-.*", // Very specific test resources
"sal-demo-.*", // SAL demo resources
"temp-resource-.*", // Temporary resources
];
for pattern in safe_patterns {
print("\nTesting safe pattern: '" + pattern + "'");
try {
// This will actually attempt deletion, but should be safe in a test environment
let deleted_count = delete(test_km, pattern);
print("✓ Pattern '" + pattern + "' matched and deleted " + deleted_count + " resources");
} catch(e) {
print("Note: Pattern '" + pattern + "' - " + e);
}
}
// Show resources after deletion attempts
print("\n--- Resources After Deletion Attempts ---");
let final_counts = resource_counts(test_km);
print("Final resource counts:");
for resource_type in final_counts.keys() {
print(" " + resource_type + ": " + final_counts[resource_type]);
}
// Example of individual resource deletion
print("\n--- Individual Resource Deletion Examples ---");
print("These functions delete specific resources by name:");
// These are examples - they will fail if the resources don't exist, which is expected
let example_deletions = [
["pod", "test-pod-example"],
["service", "test-service-example"],
["deployment", "test-deployment-example"],
];
for deletion in example_deletions {
let resource_type = deletion[0];
let resource_name = deletion[1];
print("Attempting to delete " + resource_type + ": " + resource_name);
try {
if resource_type == "pod" {
pod_delete(test_km, resource_name);
} else if resource_type == "service" {
service_delete(test_km, resource_name);
} else if resource_type == "deployment" {
deployment_delete(test_km, resource_name);
}
print("✓ Successfully deleted " + resource_type + ": " + resource_name);
} catch(e) {
print("Note: " + resource_type + " '" + resource_name + "' - " + e);
}
}
print("\n--- Best Practices for Pattern Deletion ---");
print("1. Always test patterns in a safe environment first");
print("2. Use specific patterns rather than broad ones");
print("3. Consider using dry-run approaches when possible");
print("4. Have backups or be able to recreate resources");
print("5. Use descriptive naming conventions for easier pattern matching");
print("\n--- Cleanup ---");
print("To clean up the test namespace:");
print(" kubectl delete namespace " + test_namespace);
print("\n=== Pattern deletion example completed! ===");

View File

@@ -1,33 +0,0 @@
//! Test Kubernetes module registration
//!
//! This script tests that the Kubernetes module is properly registered
//! and available in the Rhai environment.
print("=== Testing Kubernetes Module Registration ===");
// Test that we can reference the kubernetes functions
print("Testing function registration...");
// These should not error even if we can't connect to a cluster
let functions_to_test = [
"kubernetes_manager_new",
"pods_list",
"services_list",
"deployments_list",
"delete",
"namespace_create",
"namespace_exists",
"resource_counts",
"pod_delete",
"service_delete",
"deployment_delete",
"namespace"
];
for func_name in functions_to_test {
print("✓ Function '" + func_name + "' is available");
}
print("\n=== All Kubernetes functions are properly registered! ===");
print("Note: To test actual functionality, you need a running Kubernetes cluster.");
print("See other examples in this directory for real cluster operations.");

View File

@@ -1,7 +1,6 @@
// Example of using the network modules in SAL through Rhai
// Shows TCP port checking, HTTP URL validation, and SSH command execution
// Function to print section header
fn section(title) {
print("\n");
@@ -20,14 +19,14 @@ let host = "localhost";
let port = 22;
print(`Checking if port ${port} is open on ${host}...`);
let is_open = tcp.check_port(host, port);
print(`Port ${port} is ${if is_open { "open" } else { "closed" }}`);
print(`Port ${port} is ${is_open ? "open" : "closed"}`);
// Check multiple ports
let ports = [22, 80, 443];
print(`Checking multiple ports on ${host}...`);
let port_results = tcp.check_ports(host, ports);
for result in port_results {
print(`Port ${result.port} is ${if result.is_open { "open" } else { "closed" }}`);
print(`Port ${result.port} is ${result.is_open ? "open" : "closed"}`);
}
// HTTP connectivity checks
@@ -40,7 +39,7 @@ let http = net::new_http_connector();
let url = "https://www.example.com";
print(`Checking if ${url} is reachable...`);
let is_reachable = http.check_url(url);
print(`${url} is ${if is_reachable { "reachable" } else { "unreachable" }}`);
print(`${url} is ${is_reachable ? "reachable" : "unreachable"}`);
// Check the status code of a URL
print(`Checking status code of ${url}...`);
@@ -69,7 +68,7 @@ if is_open {
let ssh = net::new_ssh_builder()
.host("localhost")
.port(22)
.user(if os::get_env("USER") != () { os::get_env("USER") } else { "root" })
.user(os::get_env("USER") || "root")
.timeout(10)
.build();

View File

@@ -1,7 +1,7 @@
print("Running a basic command using run().execute()...");
print("Running a basic command using run().do()...");
// Execute a simple command
let result = run("echo Hello from run_basic!").execute();
let result = run("echo Hello from run_basic!").do();
// Print the command result
print(`Command: echo Hello from run_basic!`);
@@ -13,6 +13,6 @@ print(`Stderr:\n${result.stderr}`);
// Example of a command that might fail (if 'nonexistent_command' doesn't exist)
// This will halt execution by default because ignore_error() is not used.
// print("Running a command that will fail (and should halt)...");
// let fail_result = run("nonexistent_command").execute(); // This line will cause the script to halt if the command doesn't exist
// let fail_result = run("nonexistent_command").do(); // This line will cause the script to halt if the command doesn't exist
print("Basic run() example finished.");

View File

@@ -2,7 +2,7 @@ print("Running a command that will fail, but ignoring the error...");
// Run a command that exits with a non-zero code (will fail)
// Using .ignore_error() prevents the script from halting
let result = run("exit 1").ignore_error().execute();
let result = run("exit 1").ignore_error().do();
print(`Command finished.`);
print(`Success: ${result.success}`); // This should be false
@@ -22,7 +22,7 @@ print("\nScript continued execution after the potentially failing command.");
// Example of a command that might fail due to OS error (e.g., command not found)
// This *might* still halt depending on how the underlying Rust function handles it,
// as ignore_error() primarily prevents halting on *command* non-zero exit codes.
// let os_error_result = run("nonexistent_command_123").ignore_error().execute();
// let os_error_result = run("nonexistent_command_123").ignore_error().do();
// print(`OS Error Command Success: ${os_error_result.success}`);
// print(`OS Error Command Exit Code: ${os_error_result.code}`);

View File

@@ -1,4 +1,4 @@
print("Running a command using run().log().execute()...");
print("Running a command using run().log().do()...");
// The .log() method will print the command string to the console before execution.
// This is useful for debugging or tracing which commands are being run.

View File

@@ -1,8 +1,8 @@
print("Running a command using run().silent().execute()...\n");
print("Running a command using run().silent().do()...\n");
// This command will print to standard output and standard error
// However, because .silent() is used, the output will not appear in the console directly
let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().execute();
let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().do();
// The output is still captured in the CommandResult
print(`Command finished.`);
@@ -12,7 +12,7 @@ print(`Captured Stdout:\\n${result.stdout}`);
print(`Captured Stderr:\\n${result.stderr}`);
// Example of a silent command that fails (but won't halt because we only suppress output)
// let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().execute();
// let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().do();
// print(`Failed command finished (silent):`);
// print(`Success: ${fail_result.success}`);
// print(`Exit Code: ${fail_result.code}`);

View File

@@ -1,43 +0,0 @@
# RFS Client Rhai Examples
This folder contains Rhai examples that use the SAL RFS client wrappers registered by `sal::rhai::register(&mut engine)` and executed by the `herodo` binary.
## Quick start
Run the auth + upload + download example (uses hardcoded credentials and `/etc/hosts` as input):
```bash
cargo run -p herodo -- examples/rfsclient/auth_and_upload.rhai
```
By default, the script:
- Uses base URL `http://127.0.0.1:8080`
- Uses credentials `user` / `password`
- Uploads the file `/etc/hosts`
- Downloads to `/tmp/rfs_example_out.txt`
To customize, edit `examples/rfsclient/auth_and_upload.rhai` near the top and change `BASE_URL`, `USER`, `PASS`, and file paths.
## What the example does
- Creates the RFS client: `rfs_create_client(BASE_URL, USER, PASS, TIMEOUT)`
- Health check: `rfs_health_check()`
- Authenticates: `rfs_authenticate()`
- Uploads a file: `rfs_upload_file(local_path, chunk_size, verify)` → returns file hash
- Downloads it back: `rfs_download_file(file_id_or_hash, dest_path, verify)` → returns unit (throws on error)
See `examples/rfsclient/auth_and_upload.rhai` for details.
## Using the Rust client directly (optional)
If you want to use the Rust API (without Rhai), depend on `sal-rfs-client` and see:
- `packages/clients/rfsclient/src/client.rs` (`RfsClient`)
- `packages/clients/rfsclient/src/types.rs` (config and option types)
- `packages/clients/rfsclient/examples/` (example usage)
## Troubleshooting
- Auth failures: verify credentials and that the server requires/authenticates them.
- Connection errors: verify the base URL is reachable from your machine.

View File

@@ -1,41 +0,0 @@
// RFS Client: Auth + Upload + Download example
// Prereqs:
// - RFS server reachable at RFS_BASE_URL
// - Valid credentials in env: RFS_USER, RFS_PASS
// - Run with herodo so the SAL Rhai modules are registered
// NOTE: env_get not available in this runtime; hardcode or replace with your env loader
let BASE_URL = "http://127.0.0.1:8080";
let USER = "user";
let PASS = "password";
let TIMEOUT = 30; // seconds
if BASE_URL == "" { throw "Set BASE_URL in the script"; }
// Create client
let ok = rfs_create_client(BASE_URL, USER, PASS, TIMEOUT);
if !ok { throw "Failed to create RFS client"; }
// Optional health check
let health = rfs_health_check();
print(`RFS health: ${health}`);
// Authenticate (required for some operations)
let auth_ok = rfs_authenticate();
if !auth_ok { throw "Authentication failed"; }
// Upload a local file
// Use an existing readable file to avoid needing os_write_file module
let local_file = "/etc/hosts";
// rfs_upload_file(file_path, chunk_size, verify)
let hash = rfs_upload_file(local_file, 0, false);
print(`Uploaded file hash: ${hash}`);
// Download it back
let out_path = "/tmp/rfs_example_out.txt";
// rfs_download_file(file_id, output_path, verify) returns unit and throws on error
rfs_download_file(hash, out_path, false);
print(`Downloaded to: ${out_path}`);
true

View File

@@ -1,116 +0,0 @@
# Service Manager Examples
This directory contains examples demonstrating the SAL service manager functionality for dynamically launching and managing services across platforms.
## Overview
The service manager provides a unified interface for managing system services:
- **macOS**: Uses `launchctl` for service management
- **Linux**: Uses `zinit` for service management (systemd also available as alternative)
## Examples
### 1. Circle Worker Manager (`circle_worker_manager.rhai`)
**Primary Use Case**: Demonstrates dynamic circle worker management for freezone residents.
This example shows:
- Creating service configurations for circle workers
- Complete service lifecycle management (start, stop, restart, remove)
- Status monitoring and log retrieval
- Error handling and cleanup
```bash
# Run the circle worker management example
herodo examples/service_manager/circle_worker_manager.rhai
```
### 2. Basic Usage (`basic_usage.rhai`)
**Learning Example**: Simple demonstration of the core service manager API.
This example covers:
- Creating and configuring services
- Starting and stopping services
- Checking service status
- Listing managed services
- Retrieving service logs
```bash
# Run the basic usage example
herodo examples/service_manager/basic_usage.rhai
```
## Prerequisites
### Linux (zinit)
Make sure zinit is installed and running:
```bash
# Start zinit with default socket
zinit -s /tmp/zinit.sock init
```
### macOS (launchctl)
No additional setup required - uses the built-in launchctl system.
## Service Manager API
The service manager provides these key functions:
- `create_service_manager()` - Create platform-appropriate service manager
- `start(manager, config)` - Start a new service
- `stop(manager, service_name)` - Stop a running service
- `restart(manager, service_name)` - Restart a service
- `status(manager, service_name)` - Get service status
- `logs(manager, service_name, lines)` - Retrieve service logs
- `list(manager)` - List all managed services
- `remove(manager, service_name)` - Remove a service
- `exists(manager, service_name)` - Check if service exists
- `start_and_confirm(manager, config, timeout)` - Start with confirmation
## Service Configuration
Services are configured using a map with these fields:
```rhai
let config = #{
name: "my-service", // Service name
binary_path: "/usr/bin/my-app", // Executable path
args: ["--config", "/etc/my-app.conf"], // Command arguments
working_directory: "/var/lib/my-app", // Working directory (optional)
environment: #{ // Environment variables
"VAR1": "value1",
"VAR2": "value2"
},
auto_restart: true // Auto-restart on failure
};
```
## Real-World Usage
The circle worker example demonstrates the exact use case requested by the team:
> "We want to be able to launch circle workers dynamically. For instance when someone registers to the freezone, we need to be able to launch a circle worker for the new resident."
The service manager enables:
1. **Dynamic service creation** - Create services on-demand for new residents
2. **Cross-platform support** - Works on both macOS and Linux
3. **Lifecycle management** - Full control over service lifecycle
4. **Monitoring and logging** - Track service status and retrieve logs
5. **Cleanup** - Proper service removal when no longer needed
## Error Handling
All service manager functions can throw errors. Use try-catch blocks for robust error handling:
```rhai
try {
sm::start(manager, config);
print("✅ Service started successfully");
} catch (error) {
print(`❌ Failed to start service: ${error}`);
}
```

View File

@@ -1,85 +0,0 @@
// Basic Service Manager Usage Example
//
// This example demonstrates the basic API of the service manager.
// It works on both macOS (launchctl) and Linux (zinit/systemd).
//
// Prerequisites:
//
// Linux: The service manager will automatically discover running zinit servers
// or fall back to systemd. To use zinit, start it with:
// zinit -s /tmp/zinit.sock init
//
// You can also specify a custom socket path:
// export ZINIT_SOCKET_PATH=/your/custom/path/zinit.sock
//
// macOS: No additional setup required (uses launchctl).
//
// Usage:
// herodo examples/service_manager/basic_usage.rhai
// Service Manager Basic Usage Example
// This example uses the SAL service manager through Rhai integration
print("🚀 Basic Service Manager Usage Example");
print("======================================");
// Create a service manager for the current platform
let manager = create_service_manager();
print("🍎 Using service manager for current platform");
// Create a simple service configuration
let config = #{
name: "example-service",
binary_path: "/bin/echo",
args: ["Hello from service manager!"],
working_directory: "/tmp",
environment: #{
"EXAMPLE_VAR": "hello_world"
},
auto_restart: false
};
print("\n📝 Service Configuration:");
print(` Name: ${config.name}`);
print(` Binary: ${config.binary_path}`);
print(` Args: ${config.args}`);
// Start the service
print("\n🚀 Starting service...");
start(manager, config);
print("✅ Service started successfully");
// Check service status
print("\n📊 Checking service status...");
let status = status(manager, "example-service");
print(`Status: ${status}`);
// List all services
print("\n📋 Listing all managed services...");
let services = list(manager);
print(`Found ${services.len()} services:`);
for service in services {
print(` - ${service}`);
}
// Get service logs
print("\n📄 Getting service logs...");
let logs = logs(manager, "example-service", 5);
if logs.trim() == "" {
print("No logs available");
} else {
print(`Logs:\n${logs}`);
}
// Stop the service
print("\n🛑 Stopping service...");
stop(manager, "example-service");
print("✅ Service stopped");
// Remove the service
print("\n🗑 Removing service...");
remove(manager, "example-service");
print("✅ Service removed");
print("\n🎉 Example completed successfully!");

View File

@@ -1,141 +0,0 @@
// Circle Worker Manager Example
//
// This example demonstrates how to use the service manager to dynamically launch
// circle workers for new freezone residents. This is the primary use case requested
// by the team.
//
// Usage:
//
// On macOS (uses launchctl):
// herodo examples/service_manager/circle_worker_manager.rhai
//
// On Linux (uses zinit - requires zinit to be running):
// First start zinit: zinit -s /tmp/zinit.sock init
// herodo examples/service_manager/circle_worker_manager.rhai
// Circle Worker Manager Example
// This example uses the SAL service manager through Rhai integration
print("🚀 Circle Worker Manager Example");
print("=================================");
// Create the appropriate service manager for the current platform
let service_manager = create_service_manager();
print("✅ Created service manager for current platform");
// Simulate a new freezone resident registration
let resident_id = "resident_12345";
let worker_name = `circle-worker-${resident_id}`;
print(`\n📝 New freezone resident registered: ${resident_id}`);
print(`🔧 Creating circle worker service: ${worker_name}`);
// Create service configuration for the circle worker
let config = #{
name: worker_name,
binary_path: "/bin/sh",
args: [
"-c",
`echo 'Circle worker for ${resident_id} starting...'; sleep 30; echo 'Circle worker for ${resident_id} completed'`
],
working_directory: "/tmp",
environment: #{
"RESIDENT_ID": resident_id,
"WORKER_TYPE": "circle",
"LOG_LEVEL": "info"
},
auto_restart: true
};
print("📋 Service configuration created:");
print(` Name: ${config.name}`);
print(` Binary: ${config.binary_path}`);
print(` Args: ${config.args}`);
print(` Auto-restart: ${config.auto_restart}`);
print(`\n🔄 Demonstrating service lifecycle for: ${worker_name}`);
// 1. Check if service already exists
print("\n1⃣ Checking if service exists...");
if exists(service_manager, worker_name) {
print("⚠️ Service already exists, removing it first...");
remove(service_manager, worker_name);
print("🗑️ Existing service removed");
} else {
print("✅ Service doesn't exist, ready to create");
}
// 2. Start the service
print("\n2⃣ Starting the circle worker service...");
start(service_manager, config);
print("✅ Service started successfully");
// 3. Check service status
print("\n3⃣ Checking service status...");
let status = status(service_manager, worker_name);
print(`📊 Service status: ${status}`);
// 4. List all services to show our service is there
print("\n4⃣ Listing all managed services...");
let services = list(service_manager);
print(`📋 Managed services (${services.len()}):`);
for service in services {
let marker = if service == worker_name { "👉" } else { " " };
print(` ${marker} ${service}`);
}
// 5. Wait a moment and check status again
print("\n5⃣ Waiting 3 seconds and checking status again...");
sleep(3000); // 3 seconds in milliseconds
let status = status(service_manager, worker_name);
print(`📊 Service status after 3s: ${status}`);
// 6. Get service logs
print("\n6⃣ Retrieving service logs...");
let logs = logs(service_manager, worker_name, 10);
if logs.trim() == "" {
print("📄 No logs available yet (this is normal for new services)");
} else {
print("📄 Recent logs:");
let log_lines = logs.split('\n');
for i in 0..5 {
if i < log_lines.len() {
print(` ${log_lines[i]}`);
}
}
}
// 7. Demonstrate start_and_confirm with timeout
print("\n7⃣ Testing start_and_confirm (should succeed quickly since already running)...");
start_and_confirm(service_manager, config, 5);
print("✅ Service confirmed running within timeout");
// 8. Stop the service
print("\n8⃣ Stopping the service...");
stop(service_manager, worker_name);
print("🛑 Service stopped");
// 9. Check status after stopping
print("\n9⃣ Checking status after stop...");
let status = status(service_manager, worker_name);
print(`📊 Service status after stop: ${status}`);
// 10. Restart the service
print("\n🔟 Restarting the service...");
restart(service_manager, worker_name);
print("🔄 Service restarted successfully");
// 11. Final cleanup
print("\n🧹 Cleaning up - removing the service...");
remove(service_manager, worker_name);
print("🗑️ Service removed successfully");
// 12. Verify removal
print("\n✅ Verifying service removal...");
if !exists(service_manager, worker_name) {
print("✅ Service successfully removed");
} else {
print("⚠️ Service still exists after removal");
}
print("\n🎉 Circle worker management demonstration complete!");

View File

@@ -1,15 +0,0 @@
[package]
name = "openrouter_example"
version = "0.1.0"
edition = "2021"
[workspace]
[[bin]]
name = "openrouter_example"
path = "openrouter_example.rs"
[dependencies]
codemonkey = { path = "../../packages/ai/codemonkey" }
openai-api-rs = "6.0.8"
tokio = { version = "1.0", features = ["full"] }

View File

@@ -1,47 +0,0 @@
use codemonkey::{create_ai_provider, AIProviderType, CompletionRequestBuilder, Message, MessageRole, Content};
use std::error::Error;
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let (mut provider, provider_type) = create_ai_provider(AIProviderType::OpenRouter)?;
let messages = vec![Message {
role: MessageRole::user,
content: Content::Text("Explain the concept of a factory design pattern in Rust.".to_string()),
name: None,
tool_calls: None,
tool_call_id: None,
}];
println!("Sending request to OpenRouter...");
let response = CompletionRequestBuilder::new(
&mut *provider,
"openai/gpt-oss-120b".to_string(), // Model name as specified by the user
messages,
provider_type, // Pass the provider_type
)
.temperature(1.0)
.max_tokens(8192)
.top_p(1.0)
.reasoning_effort("medium")
.stream(false)
.openrouter_options(|builder| {
builder.provider(
codemonkey::OpenRouterProviderOptionsBuilder::new()
.order(vec!["cerebras"])
.build(),
)
})
.completion()
.await?;
for choice in response.choices {
if let Some(content) = choice.message.content {
print!("{}", content);
}
}
println!();
Ok(())
}

View File

@@ -1,13 +0,0 @@
#!/bin/bash
set -e
# Change to directory where this script is located
cd "$(dirname "${BASH_SOURCE[0]}")"
source ../../config/myenv.sh
# Build the example
cargo build
# Run the example
cargo run --bin openrouter_example

View File

@@ -10,12 +10,12 @@ license = "Apache-2.0"
[dependencies]
# Use workspace dependencies for consistency
regex = { workspace = true }
redis = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
rhai = { workspace = true }
log = { workspace = true }
url = { workspace = true }
redis = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }

View File

@@ -1,18 +1,9 @@
# SAL Git Package (`sal-git`)
# SAL `git` Module
The `sal-git` package provides comprehensive functionalities for interacting with Git repositories. It offers both high-level abstractions for common Git workflows and a flexible executor for running arbitrary Git commands with integrated authentication.
The `git` module in SAL provides comprehensive functionalities for interacting with Git repositories. It offers both high-level abstractions for common Git workflows and a flexible executor for running arbitrary Git commands with integrated authentication.
This module is central to SAL's capabilities for managing source code, enabling automation of development tasks, and integrating with version control systems.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-git = "0.1.0"
```
## Core Components
The module is primarily composed of two main parts:

View File

@@ -18,8 +18,8 @@ path = "src/main.rs"
env_logger = { workspace = true }
rhai = { workspace = true }
# SAL library for Rhai module registration (with all features for herodo)
sal = { path = "..", features = ["all"] }
# SAL library for Rhai module registration
sal = { path = ".." }
[dev-dependencies]
tempfile = { workspace = true }

View File

@@ -15,32 +15,14 @@ Herodo is a command-line utility that executes Rhai scripts with full access to
## Installation
### Build and Install
Build the herodo binary:
```bash
git clone https://github.com/PlanetFirst/sal.git
cd sal
./build_herodo.sh
cd herodo
cargo build --release
```
This script will:
- Build herodo in debug mode
- Install it to `~/hero/bin/herodo` (non-root) or `/usr/local/bin/herodo` (root)
- Make it available in your PATH
**Note**: If using the non-root installation, make sure `~/hero/bin` is in your PATH:
```bash
export PATH="$HOME/hero/bin:$PATH"
```
### Install from crates.io (Coming Soon)
```bash
# This will be available once herodo is published to crates.io
cargo install herodo
```
**Note**: `herodo` is not yet published to crates.io due to publishing rate limits. It will be available soon.
The executable will be available at `target/release/herodo`.
## Usage

View File

@@ -3,7 +3,7 @@
//! This library loads the Rhai engine, registers all SAL modules,
//! and executes Rhai scripts from a specified directory in sorted order.
use rhai::{Engine, Scope};
use rhai::Engine;
use std::error::Error;
use std::fs;
use std::path::{Path, PathBuf};
@@ -29,19 +29,6 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> {
// Create a new Rhai engine
let mut engine = Engine::new();
// TODO: if we create a scope here we could clean up all the different functionsand types regsitered wit the engine
// We should generalize the way we add things to the scope for each module sepeartely
let mut scope = Scope::new();
// Conditionally add Hetzner client only when env config is present
if let Ok(cfg) = sal::hetzner::config::Config::from_env() {
let hetzner_client = sal::hetzner::api::Client::new(cfg);
scope.push("hetzner", hetzner_client);
}
// This makes it easy to call e.g. `hetzner.get_server()` or `mycelium.get_connected_peers()`
// --> without the need of manually created a client for each one first
// --> could be conditionally compiled to only use those who we need (we only push the things to the scope that we actually need to run the script)
// Register println function for output
engine.register_fn("println", |s: &str| println!("{}", s));
@@ -91,20 +78,19 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> {
let script = fs::read_to_string(&script_file)?;
// Execute the script
// match engine.eval::<rhai::Dynamic>(&script) {
// Ok(result) => {
// println!("Script executed successfully");
// if !result.is_unit() {
// println!("Result: {}", result);
// }
// }
// Err(err) => {
// eprintln!("Error executing script: {}", err);
// // Exit with error code when a script fails
// process::exit(1);
// }
// }
engine.run_with_scope(&mut scope, &script)?;
match engine.eval::<rhai::Dynamic>(&script) {
Ok(result) => {
println!("Script executed successfully");
if !result.is_unit() {
println!("Result: {}", result);
}
}
Err(err) => {
eprintln!("Error executing script: {}", err);
// Exit with error code when a script fails
process::exit(1);
}
}
}
println!("\nAll scripts executed successfully!");

View File

@@ -9,22 +9,22 @@ license = "Apache-2.0"
[dependencies]
# HTTP client for async requests
reqwest = { workspace = true }
reqwest = { version = "0.12.15", features = ["json"] }
# JSON handling
serde_json = { workspace = true }
serde_json = "1.0"
# Base64 encoding/decoding for message payloads
base64 = { workspace = true }
base64 = "0.22.1"
# Async runtime
tokio = { workspace = true }
tokio = { version = "1.45.0", features = ["full"] }
# Rhai scripting support
rhai = { workspace = true }
rhai = { version = "1.12.0", features = ["sync"] }
# Logging
log = { workspace = true }
log = "0.4"
# URL encoding for API parameters
urlencoding = { workspace = true }
urlencoding = "2.1.3"
[dev-dependencies]
# For async testing
tokio-test = { workspace = true }
tokio-test = "0.4.4"
# For temporary files in tests
tempfile = { workspace = true }
tempfile = "3.5"

View File

@@ -1,16 +1,7 @@
# SAL Mycelium (`sal-mycelium`)
# SAL Mycelium
A Rust client library for interacting with Mycelium node's HTTP API, with Rhai scripting support.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-mycelium = "0.1.0"
```
## Overview
SAL Mycelium provides async HTTP client functionality for managing Mycelium nodes, including:

View File

@@ -10,7 +10,7 @@ keywords = ["network", "tcp", "http", "ssh", "connectivity"]
categories = ["network-programming", "api-bindings"]
[dependencies]
anyhow = { workspace = true }
tokio = { workspace = true }
reqwest = { workspace = true, features = ["json", "blocking"] }
rhai = { workspace = true }
anyhow = "1.0.98"
tokio = { version = "1.0", features = ["full"] }
reqwest = { version = "0.12", features = ["json", "blocking"] }
rhai = "1.19.0"

View File

@@ -1,16 +1,7 @@
# SAL Network Package (`sal-net`)
# SAL Network Package
Network connectivity utilities for TCP, HTTP, and SSH operations.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
sal-net = "0.1.0"
```
## Overview
The `sal-net` package provides a comprehensive set of network connectivity tools for the SAL (System Abstraction Layer) ecosystem. It includes utilities for TCP port checking, HTTP/HTTPS connectivity testing, and SSH command execution.

View File

@@ -14,8 +14,7 @@ categories = ["os", "filesystem", "api-bindings"]
dirs = { workspace = true }
glob = { workspace = true }
libc = { workspace = true }
anyhow = {workspace = true}
reqwest = {workspace = true}
# Error handling
thiserror = { workspace = true }

View File

@@ -165,18 +165,9 @@ fn test_mv() {
#[test]
fn test_which() {
// Test with a command that should exist on all systems
#[cfg(target_os = "windows")]
let existing_cmd = "cmd";
#[cfg(not(target_os = "windows"))]
let existing_cmd = "ls";
let result = fs::which(existing_cmd);
assert!(
!result.is_empty(),
"Command '{}' should exist",
existing_cmd
);
// Test with a command that should exist on most systems
let result = fs::which("ls");
assert!(!result.is_empty());
// Test with a command that shouldn't exist
let result = fs::which("nonexistentcommand12345");

Some files were not shown because too many files have changed in this diff Show More