Compare commits
23 Commits
eed6dbf8dc
...
developmen
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9865e601d7 | ||
|
|
7afa5ea1c0 | ||
|
|
6c2d96c9a5 | ||
|
|
b2fc0976bd | ||
|
|
e114404ca7 | ||
|
|
536779f521 | ||
|
|
c2969621b1 | ||
|
|
b39f24ca8f | ||
| f87a1d7f80 | |||
| 17e5924e0b | |||
|
|
768e3e176d | ||
|
|
aa0248ef17 | ||
|
|
aab2b6f128 | ||
|
|
d735316b7f | ||
|
|
d1c80863b8 | ||
|
|
169c62da47 | ||
|
|
33a5f24981 | ||
|
|
d7562ce466 | ||
| ca736d62f3 | |||
|
|
078c6f723b | ||
|
|
9fdb8d8845 | ||
| 8203a3b1ff | |||
| 1770ac561e |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -63,4 +63,7 @@ sidebars.ts
|
||||
|
||||
tsconfig.json
|
||||
Cargo.toml.bak
|
||||
for_augment
|
||||
for_augment
|
||||
|
||||
myenv.sh
|
||||
|
||||
|
||||
21
Cargo.toml
21
Cargo.toml
@@ -16,16 +16,23 @@ members = [
|
||||
"packages/clients/postgresclient",
|
||||
"packages/clients/redisclient",
|
||||
"packages/clients/zinitclient",
|
||||
"packages/clients/rfsclient",
|
||||
"packages/core/net",
|
||||
"packages/core/text",
|
||||
"packages/crypt/vault",
|
||||
"packages/data/ourdb",
|
||||
"packages/data/radixtree",
|
||||
"packages/data/tst",
|
||||
"packages/system/git",
|
||||
"packages/system/kubernetes",
|
||||
"packages/system/os",
|
||||
"packages/system/process",
|
||||
"packages/system/virt",
|
||||
"rhai",
|
||||
"rhailib",
|
||||
"herodo",
|
||||
"packages/clients/hetznerclient",
|
||||
"packages/ai/codemonkey",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -37,6 +44,7 @@ rust-version = "1.70.0"
|
||||
# Core shared dependencies with consistent versions
|
||||
anyhow = "1.0.98"
|
||||
base64 = "0.22.1"
|
||||
bytes = "1.7.1"
|
||||
dirs = "6.0.0"
|
||||
env_logger = "0.11.8"
|
||||
futures = "0.3.30"
|
||||
@@ -47,7 +55,7 @@ log = "0.4"
|
||||
once_cell = "1.18.0"
|
||||
rand = "0.8.5"
|
||||
regex = "1.8.1"
|
||||
reqwest = { version = "0.12.15", features = ["json"] }
|
||||
reqwest = { version = "0.12.15", features = ["json", "blocking"] }
|
||||
rhai = { version = "1.12.0", features = ["sync"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
@@ -102,6 +110,8 @@ sal-git = { path = "packages/system/git" }
|
||||
sal-kubernetes = { path = "packages/system/kubernetes" }
|
||||
sal-redisclient = { path = "packages/clients/redisclient" }
|
||||
sal-mycelium = { path = "packages/clients/myceliumclient" }
|
||||
sal-hetzner = { path = "packages/clients/hetznerclient" }
|
||||
sal-rfs-client = { path = "packages/clients/rfsclient" }
|
||||
sal-text = { path = "packages/core/text" }
|
||||
sal-os = { path = "packages/system/os" }
|
||||
sal-net = { path = "packages/core/net" }
|
||||
@@ -122,6 +132,8 @@ sal-git = { workspace = true, optional = true }
|
||||
sal-kubernetes = { workspace = true, optional = true }
|
||||
sal-redisclient = { workspace = true, optional = true }
|
||||
sal-mycelium = { workspace = true, optional = true }
|
||||
sal-hetzner = { workspace = true, optional = true }
|
||||
sal-rfs-client = { workspace = true, optional = true }
|
||||
sal-text = { workspace = true, optional = true }
|
||||
sal-os = { workspace = true, optional = true }
|
||||
sal-net = { workspace = true, optional = true }
|
||||
@@ -141,6 +153,8 @@ git = ["dep:sal-git"]
|
||||
kubernetes = ["dep:sal-kubernetes"]
|
||||
redisclient = ["dep:sal-redisclient"]
|
||||
mycelium = ["dep:sal-mycelium"]
|
||||
hetzner = ["dep:sal-hetzner"]
|
||||
rfsclient = ["dep:sal-rfs-client"]
|
||||
text = ["dep:sal-text"]
|
||||
os = ["dep:sal-os"]
|
||||
net = ["dep:sal-net"]
|
||||
@@ -154,7 +168,7 @@ rhai = ["dep:sal-rhai"]
|
||||
|
||||
# Convenience feature groups
|
||||
core = ["os", "process", "text", "net"]
|
||||
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium"]
|
||||
clients = ["redisclient", "postgresclient", "zinit_client", "mycelium", "hetzner", "rfsclient"]
|
||||
infrastructure = ["git", "vault", "kubernetes", "virt"]
|
||||
scripting = ["rhai"]
|
||||
all = [
|
||||
@@ -162,6 +176,8 @@ all = [
|
||||
"kubernetes",
|
||||
"redisclient",
|
||||
"mycelium",
|
||||
"hetzner",
|
||||
"rfsclient",
|
||||
"text",
|
||||
"os",
|
||||
"net",
|
||||
@@ -188,4 +204,3 @@ required-features = ["kubernetes"]
|
||||
name = "generic_cluster"
|
||||
path = "examples/kubernetes/clusters/generic.rs"
|
||||
required-features = ["kubernetes"]
|
||||
|
||||
|
||||
228
README.md
228
README.md
@@ -1,148 +1,136 @@
|
||||
# SAL (System Abstraction Layer)
|
||||
# Herocode Herolib Rust Repository
|
||||
|
||||
**Version 0.1.0** - A modular Rust library for cross-platform system operations and automation.
|
||||
## Overview
|
||||
|
||||
SAL provides a unified interface for system operations with Rhai scripting support through the `herodo` tool.
|
||||
This repository contains the **Herocode Herolib** Rust library and a collection of scripts, examples, and utilities for building, testing, and publishing the SAL (System Abstraction Layer) crates. The repository includes:
|
||||
|
||||
## Installation
|
||||
- **Rust crates** for various system components (e.g., `os`, `process`, `text`, `git`, `vault`, `kubernetes`, etc.).
|
||||
- **Rhai scripts** and test suites for each crate.
|
||||
- **Utility scripts** to automate common development tasks.
|
||||
|
||||
### Individual Packages (Recommended)
|
||||
## Scripts
|
||||
|
||||
The repository provides three primary helper scripts located in the repository root:
|
||||
|
||||
| Script | Description | Typical Usage |
|
||||
|--------|-------------|--------------|
|
||||
| `scripts/publish-all.sh` | Publishes all SAL crates to **crates.io** in the correct dependency order. Handles version bumping, dependency updates, dry‑run mode, and rate‑limiting. | `./scripts/publish-all.sh [--dry-run] [--wait <seconds>] [--version <ver>]` |
|
||||
| `build_herodo.sh` | Builds the `herodo` binary from the `herodo` package and optionally runs a specified Rhai script. | `./build_herodo.sh [script_name]` |
|
||||
| `run_rhai_tests.sh` | Executes all Rhai test suites across the repository, logging results and providing a summary. | `./run_rhai_tests.sh` |
|
||||
|
||||
Below are detailed usage instructions for each script.
|
||||
|
||||
---
|
||||
|
||||
## 1. `scripts/publish-all.sh`
|
||||
|
||||
### Purpose
|
||||
|
||||
- Publishes each SAL crate in the correct dependency order.
|
||||
- Updates crate versions (if `--version` is supplied).
|
||||
- Updates path dependencies to version dependencies before publishing.
|
||||
- Supports **dry‑run** mode to preview actions without publishing.
|
||||
- Handles rate‑limiting between crate publishes.
|
||||
|
||||
### Options
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--dry-run` | Shows what would be published without actually publishing. |
|
||||
| `--wait <seconds>` | Wait time between publishes (default: 15 s). |
|
||||
| `--version <ver>` | Set a new version for all crates (updates `Cargo.toml` files). |
|
||||
| `-h, --help` | Show help message. |
|
||||
|
||||
### Example Usage
|
||||
|
||||
```bash
|
||||
# Core functionality
|
||||
cargo add sal-os sal-process sal-text sal-net
|
||||
# Dry run – no crates will be published
|
||||
./scripts/publish-all.sh --dry-run
|
||||
|
||||
# Infrastructure
|
||||
cargo add sal-git sal-vault sal-kubernetes sal-virt
|
||||
# Publish with a custom wait time and version bump
|
||||
./scripts/publish-all.sh --wait 30 --version 1.2.3
|
||||
|
||||
# Database clients
|
||||
cargo add sal-redisclient sal-postgresclient sal-zinit-client
|
||||
|
||||
# Scripting
|
||||
cargo add sal-rhai
|
||||
# Normal publish (no dry‑run)
|
||||
./scripts/publish-all.sh
|
||||
```
|
||||
|
||||
### Meta-package with Features
|
||||
### Notes
|
||||
|
||||
- Must be run from the repository root (where `Cargo.toml` lives).
|
||||
- Requires `cargo` and a logged‑in `cargo` session (`cargo login`).
|
||||
- The script automatically updates dependencies in each crate’s `Cargo.toml` to use the new version before publishing.
|
||||
|
||||
---
|
||||
|
||||
## 2. `build_herodo.sh`
|
||||
|
||||
### Purpose
|
||||
|
||||
- Builds the `herodo` binary from the `herodo` package.
|
||||
- Copies the binary to a system‑wide location (`/usr/local/bin`) if run as root, otherwise to `~/hero/bin`.
|
||||
- Optionally runs a specified Rhai script after building.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
cargo add sal --features core # os, process, text, net
|
||||
cargo add sal --features infrastructure # git, vault, kubernetes, virt
|
||||
cargo add sal --features all # everything
|
||||
# Build only
|
||||
./build_herodo.sh
|
||||
|
||||
# Build and run a specific Rhai script (e.g., `example`):
|
||||
./build_herodo.sh example
|
||||
```
|
||||
|
||||
### Herodo Script Runner
|
||||
### Details
|
||||
|
||||
- The script changes to its own directory, builds the `herodo` crate (`cargo build`), and copies the binary.
|
||||
- If a script name is provided, it looks for the script in:
|
||||
- `src/rhaiexamples/<name>.rhai`
|
||||
- `src/herodo/scripts/<name>.rhai`
|
||||
- If the script is not found, the script exits with an error.
|
||||
|
||||
---
|
||||
|
||||
## 3. `run_rhai_tests.sh`
|
||||
|
||||
### Purpose
|
||||
|
||||
- Runs **all** Rhai test suites across the repository.
|
||||
- Supports both the legacy `rhai_tests` directory and the newer `*/tests/rhai` layout.
|
||||
- Logs output to `run_rhai_tests.log` and prints a summary.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
cargo install herodo
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Rust Library Usage
|
||||
|
||||
```rust
|
||||
use sal_os::fs;
|
||||
use sal_process::run;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let files = fs::list_files(".")?;
|
||||
println!("Found {} files", files.len());
|
||||
|
||||
let result = run::command("echo hello")?;
|
||||
println!("Output: {}", result.stdout);
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### Herodo Scripting
|
||||
|
||||
```bash
|
||||
# Create script
|
||||
cat > example.rhai << 'EOF'
|
||||
let files = find_files(".", "*.rs");
|
||||
print("Found " + files.len() + " Rust files");
|
||||
|
||||
let result = run("echo 'Hello from SAL!'");
|
||||
print("Output: " + result.stdout);
|
||||
EOF
|
||||
|
||||
# Run script
|
||||
herodo example.rhai
|
||||
```
|
||||
|
||||
## Available Packages
|
||||
|
||||
| Package | Description |
|
||||
|---------|-------------|
|
||||
| [`sal-os`](https://crates.io/crates/sal-os) | Operating system operations |
|
||||
| [`sal-process`](https://crates.io/crates/sal-process) | Process management |
|
||||
| [`sal-text`](https://crates.io/crates/sal-text) | Text processing |
|
||||
| [`sal-net`](https://crates.io/crates/sal-net) | Network operations |
|
||||
| [`sal-git`](https://crates.io/crates/sal-git) | Git repository management |
|
||||
| [`sal-vault`](https://crates.io/crates/sal-vault) | Cryptographic operations |
|
||||
| [`sal-kubernetes`](https://crates.io/crates/sal-kubernetes) | Kubernetes management |
|
||||
| [`sal-virt`](https://crates.io/crates/sal-virt) | Virtualization tools |
|
||||
| [`sal-redisclient`](https://crates.io/crates/sal-redisclient) | Redis client |
|
||||
| [`sal-postgresclient`](https://crates.io/crates/sal-postgresclient) | PostgreSQL client |
|
||||
| [`sal-zinit-client`](https://crates.io/crates/sal-zinit-client) | Zinit process supervisor |
|
||||
| [`sal-mycelium`](https://crates.io/crates/sal-mycelium) | Mycelium network client |
|
||||
| [`sal-service-manager`](https://crates.io/crates/sal-service-manager) | Service management |
|
||||
| [`sal-rhai`](https://crates.io/crates/sal-rhai) | Rhai scripting integration |
|
||||
| [`sal`](https://crates.io/crates/sal) | Meta-crate with features |
|
||||
| [`herodo`](https://crates.io/crates/herodo) | Script executor binary |
|
||||
|
||||
## Building & Testing
|
||||
|
||||
```bash
|
||||
# Build all packages
|
||||
cargo build --workspace
|
||||
|
||||
# Run tests
|
||||
cargo test --workspace
|
||||
|
||||
# Run Rhai integration tests
|
||||
# Run all tests
|
||||
./run_rhai_tests.sh
|
||||
```
|
||||
|
||||
## Core Features
|
||||
### Output
|
||||
|
||||
- **System Operations**: File/directory management, environment access, OS commands
|
||||
- **Process Management**: Create, monitor, and control system processes
|
||||
- **Containerization**: Buildah and nerdctl integration
|
||||
- **Version Control**: Git repository operations
|
||||
- **Database Clients**: Redis and PostgreSQL support
|
||||
- **Networking**: HTTP, TCP, SSH connectivity utilities
|
||||
- **Cryptography**: Key management, encryption, digital signatures
|
||||
- **Text Processing**: String manipulation and templating
|
||||
- **Scripting**: Rhai script execution via `herodo`
|
||||
- Colored console output for readability.
|
||||
- Log file (`run_rhai_tests.log`) contains full output for later review.
|
||||
- Summary includes total modules, passed, and failed counts.
|
||||
- Exit code `0` if all tests pass, `1` otherwise.
|
||||
|
||||
## Herodo Scripting
|
||||
---
|
||||
|
||||
`herodo` executes Rhai scripts with access to all SAL modules:
|
||||
## General Development Workflow
|
||||
|
||||
```bash
|
||||
herodo script.rhai # Run single script
|
||||
herodo script.rhai arg1 arg2 # With arguments
|
||||
herodo /path/to/scripts/ # Run all .rhai files in directory
|
||||
```
|
||||
1. **Build**: Use `build_herodo.sh` to compile the `herodo` binary.
|
||||
2. **Test**: Run `run_rhai_tests.sh` to ensure all Rhai scripts pass.
|
||||
3. **Publish**: When ready to release, use `scripts/publish-all.sh` (with `--dry-run` first to verify).
|
||||
|
||||
### Example Script
|
||||
## Prerequisites
|
||||
|
||||
```rhai
|
||||
// File operations
|
||||
let files = find_files(".", "*.rs");
|
||||
print("Found " + files.len() + " Rust files");
|
||||
|
||||
// Process execution
|
||||
let result = run("echo 'Hello SAL!'");
|
||||
print("Output: " + result.stdout);
|
||||
|
||||
// Redis operations
|
||||
redis_set("status", "running");
|
||||
let status = redis_get("status");
|
||||
print("Status: " + status);
|
||||
```
|
||||
- **Rust toolchain** (`cargo`, `rustc`) installed.
|
||||
- **Rhai** interpreter (`herodo`) built and available.
|
||||
- **Git** for version control.
|
||||
- **Cargo login** for publishing to crates.io.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the Apache License 2.0. See [LICENSE](LICENSE) for details.
|
||||
See `LICENSE` for details.
|
||||
|
||||
---
|
||||
|
||||
**Happy coding!**
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
[package]
|
||||
name = "sal-service-manager"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["PlanetFirst <info@incubaid.com>"]
|
||||
description = "SAL Service Manager - Cross-platform service management for dynamic worker deployment"
|
||||
repository = "https://git.threefold.info/herocode/sal"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
# Use workspace dependencies for consistency
|
||||
thiserror = "1.0"
|
||||
tokio = { workspace = true }
|
||||
log = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
# Use base zinit-client instead of SAL wrapper
|
||||
zinit-client = { version = "0.4.0" }
|
||||
# Optional Rhai integration
|
||||
rhai = { workspace = true, optional = true }
|
||||
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
# macOS-specific dependencies for launchctl
|
||||
plist = "1.6"
|
||||
|
||||
[features]
|
||||
default = ["zinit"]
|
||||
zinit = []
|
||||
rhai = ["dep:rhai"]
|
||||
|
||||
# Enable zinit feature for tests
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
rhai = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
env_logger = "0.10"
|
||||
|
||||
[[test]]
|
||||
name = "zinit_integration_tests"
|
||||
required-features = ["zinit"]
|
||||
@@ -1,198 +0,0 @@
|
||||
# SAL Service Manager
|
||||
|
||||
[](https://crates.io/crates/sal-service-manager)
|
||||
[](https://docs.rs/sal-service-manager)
|
||||
|
||||
A cross-platform service management library for the System Abstraction Layer (SAL). This crate provides a unified interface for managing system services across different platforms, enabling dynamic deployment of workers and services.
|
||||
|
||||
## Features
|
||||
|
||||
- **Cross-platform service management** - Unified API across macOS and Linux
|
||||
- **Dynamic worker deployment** - Perfect for circle workers and on-demand services
|
||||
- **Platform-specific implementations**:
|
||||
- **macOS**: Uses `launchctl` with plist management
|
||||
- **Linux**: Uses `zinit` for lightweight service management (systemd also available)
|
||||
- **Complete lifecycle management** - Start, stop, restart, status monitoring, and log retrieval
|
||||
- **Service configuration** - Environment variables, working directories, auto-restart
|
||||
- **Production-ready** - Comprehensive error handling and resource management
|
||||
|
||||
## Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal-service-manager = "0.1.0"
|
||||
```
|
||||
|
||||
Or use it as part of the SAL ecosystem:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
sal = { version = "0.1.0", features = ["service_manager"] }
|
||||
```
|
||||
|
||||
## Primary Use Case: Dynamic Circle Worker Management
|
||||
|
||||
This service manager was designed specifically for dynamic deployment of circle workers in freezone environments. When a new resident registers, you can instantly launch a dedicated circle worker:
|
||||
|
||||
```rust,no_run
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig};
|
||||
use std::collections::HashMap;
|
||||
|
||||
// New resident registration triggers worker creation
|
||||
fn deploy_circle_worker(resident_id: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let manager = create_service_manager();
|
||||
|
||||
let mut env = HashMap::new();
|
||||
env.insert("RESIDENT_ID".to_string(), resident_id.to_string());
|
||||
env.insert("WORKER_TYPE".to_string(), "circle".to_string());
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: format!("circle-worker-{}", resident_id),
|
||||
binary_path: "/usr/bin/circle-worker".to_string(),
|
||||
args: vec!["--resident".to_string(), resident_id.to_string()],
|
||||
working_directory: Some("/var/lib/circle-workers".to_string()),
|
||||
environment: env,
|
||||
auto_restart: true,
|
||||
};
|
||||
|
||||
// Deploy the worker
|
||||
manager.start(&config)?;
|
||||
println!("✅ Circle worker deployed for resident: {}", resident_id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Basic Usage Example
|
||||
|
||||
Here is an example of the core service management API:
|
||||
|
||||
```rust,no_run
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig};
|
||||
use std::collections::HashMap;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let service_manager = create_service_manager();
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: "my-service".to_string(),
|
||||
binary_path: "/usr/local/bin/my-service-executable".to_string(),
|
||||
args: vec!["--config".to_string(), "/etc/my-service.conf".to_string()],
|
||||
working_directory: Some("/var/tmp".to_string()),
|
||||
environment: HashMap::new(),
|
||||
auto_restart: true,
|
||||
};
|
||||
|
||||
// Start a new service
|
||||
service_manager.start(&config)?;
|
||||
|
||||
// Get the status of the service
|
||||
let status = service_manager.status("my-service")?;
|
||||
println!("Service status: {:?}", status);
|
||||
|
||||
// Stop the service
|
||||
service_manager.stop("my-service")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
Comprehensive examples are available in the SAL examples directory:
|
||||
|
||||
### Circle Worker Manager Example
|
||||
|
||||
The primary use case - dynamically launching circle workers for new freezone residents:
|
||||
|
||||
```bash
|
||||
# Run the circle worker management example
|
||||
herodo examples/service_manager/circle_worker_manager.rhai
|
||||
```
|
||||
|
||||
This example demonstrates:
|
||||
- Creating service configurations for circle workers
|
||||
- Complete service lifecycle management
|
||||
- Error handling and status monitoring
|
||||
- Service cleanup and removal
|
||||
|
||||
### Basic Usage Example
|
||||
|
||||
A simpler example showing the core API:
|
||||
|
||||
```bash
|
||||
# Run the basic usage example
|
||||
herodo examples/service_manager/basic_usage.rhai
|
||||
```
|
||||
|
||||
See `examples/service_manager/README.md` for detailed documentation.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the test suite:
|
||||
|
||||
```bash
|
||||
cargo test -p sal-service-manager
|
||||
```
|
||||
|
||||
For Rhai integration tests:
|
||||
|
||||
```bash
|
||||
cargo test -p sal-service-manager --features rhai
|
||||
```
|
||||
|
||||
### Testing with Herodo
|
||||
|
||||
To test the service manager with real Rhai scripts using herodo, first build herodo:
|
||||
|
||||
```bash
|
||||
./build_herodo.sh
|
||||
```
|
||||
|
||||
Then run Rhai scripts that use the service manager:
|
||||
|
||||
```bash
|
||||
herodo your_service_script.rhai
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Linux (zinit/systemd)
|
||||
|
||||
The service manager automatically discovers running zinit servers and falls back to systemd if none are found.
|
||||
|
||||
**For zinit (recommended):**
|
||||
|
||||
```bash
|
||||
# Start zinit with default socket
|
||||
zinit -s /tmp/zinit.sock init
|
||||
|
||||
# Or with a custom socket path
|
||||
zinit -s /var/run/zinit.sock init
|
||||
```
|
||||
|
||||
**Socket Discovery:**
|
||||
The service manager will automatically find running zinit servers by checking:
|
||||
1. `ZINIT_SOCKET_PATH` environment variable (if set)
|
||||
2. Common socket locations: `/var/run/zinit.sock`, `/tmp/zinit.sock`, `/run/zinit.sock`, `./zinit.sock`
|
||||
|
||||
**Custom socket path:**
|
||||
```bash
|
||||
# Set custom socket path
|
||||
export ZINIT_SOCKET_PATH=/your/custom/path/zinit.sock
|
||||
```
|
||||
|
||||
**Systemd fallback:**
|
||||
If no zinit server is detected, the service manager automatically falls back to systemd.
|
||||
|
||||
### macOS (launchctl)
|
||||
|
||||
No additional setup required - uses the built-in launchctl system.
|
||||
|
||||
## Platform Support
|
||||
|
||||
- **macOS**: Full support using `launchctl` for service management
|
||||
- **Linux**: Full support using `zinit` for service management (systemd also available as alternative)
|
||||
- **Windows**: Not currently supported
|
||||
@@ -1,47 +0,0 @@
|
||||
# Service Manager Examples
|
||||
|
||||
This directory contains examples demonstrating the usage of the `sal-service-manager` crate.
|
||||
|
||||
## Running Examples
|
||||
|
||||
To run any example, use the following command structure from the `service_manager` crate's root directory:
|
||||
|
||||
```sh
|
||||
cargo run --example <EXAMPLE_NAME>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 1. `simple_service`
|
||||
|
||||
This example demonstrates the ideal, clean lifecycle of a service using the separated `create` and `start` steps.
|
||||
|
||||
**Behavior:**
|
||||
1. Creates a new service definition.
|
||||
2. Starts the newly created service.
|
||||
3. Checks its status to confirm it's running.
|
||||
4. Stops the service.
|
||||
5. Checks its status again to confirm it's stopped.
|
||||
6. Removes the service definition.
|
||||
|
||||
**Run it:**
|
||||
```sh
|
||||
cargo run --example simple_service
|
||||
```
|
||||
|
||||
### 2. `service_spaghetti`
|
||||
|
||||
This example demonstrates how the service manager handles "messy" or improper sequences of operations, showcasing its error handling and robustness.
|
||||
|
||||
**Behavior:**
|
||||
1. Creates a service.
|
||||
2. Starts the service.
|
||||
3. Tries to start the **same service again** (which should fail as it's already running).
|
||||
4. Removes the service **without stopping it first** (the manager should handle this gracefully).
|
||||
5. Tries to stop the **already removed** service (which should fail).
|
||||
6. Tries to remove the service **again** (which should also fail).
|
||||
|
||||
**Run it:**
|
||||
```sh
|
||||
cargo run --example service_spaghetti
|
||||
```
|
||||
@@ -1,109 +0,0 @@
|
||||
//! service_spaghetti - An example of messy service management.
|
||||
//!
|
||||
//! This example demonstrates how the service manager behaves when commands
|
||||
//! are issued in a less-than-ideal order, such as starting a service that's
|
||||
//! already running or removing a service that hasn't been stopped.
|
||||
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig};
|
||||
use std::collections::HashMap;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
fn main() {
|
||||
// Initialize logging to see socket discovery in action
|
||||
env_logger::init();
|
||||
|
||||
let manager = match create_service_manager() {
|
||||
Ok(manager) => manager,
|
||||
Err(e) => {
|
||||
eprintln!("Error: Failed to create service manager: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let service_name = "com.herocode.examples.spaghetti";
|
||||
|
||||
let service_config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
binary_path: "/bin/sh".to_string(),
|
||||
args: vec![
|
||||
"-c".to_string(),
|
||||
"while true; do echo 'Spaghetti service is running...'; sleep 5; done".to_string(),
|
||||
],
|
||||
working_directory: None,
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
println!("--- Service Spaghetti Example ---");
|
||||
println!("This example demonstrates messy, error-prone service management.");
|
||||
|
||||
// Cleanup from previous runs to ensure a clean slate
|
||||
if let Ok(true) = manager.exists(service_name) {
|
||||
println!(
|
||||
"\nService '{}' found from a previous run. Cleaning up first.",
|
||||
service_name
|
||||
);
|
||||
let _ = manager.stop(service_name);
|
||||
let _ = manager.remove(service_name);
|
||||
println!("Cleanup complete.");
|
||||
}
|
||||
|
||||
// 1. Start the service (creates and starts in one step)
|
||||
println!("\n1. Starting the service for the first time...");
|
||||
match manager.start(&service_config) {
|
||||
Ok(()) => println!(" -> Success: Service '{}' started.", service_name),
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
" -> Error: Failed to start service: {}. Halting example.",
|
||||
e
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
|
||||
// 2. Try to start the service again while it's already running
|
||||
println!("\n2. Trying to start the *same service* again...");
|
||||
match manager.start(&service_config) {
|
||||
Ok(()) => println!(" -> Unexpected Success: Service started again."),
|
||||
Err(e) => eprintln!(
|
||||
" -> Expected Error: {}. The manager should detect it is already running.",
|
||||
e
|
||||
),
|
||||
}
|
||||
|
||||
// 3. Let it run for a bit
|
||||
println!("\n3. Letting the service run for 5 seconds...");
|
||||
thread::sleep(Duration::from_secs(5));
|
||||
|
||||
// 4. Remove the service without stopping it first
|
||||
// The `remove` function is designed to stop the service if it's running.
|
||||
println!("\n4. Removing the service without explicitly stopping it first...");
|
||||
match manager.remove(service_name) {
|
||||
Ok(()) => println!(" -> Success: Service was stopped and removed."),
|
||||
Err(e) => eprintln!(" -> Error: Failed to remove service: {}", e),
|
||||
}
|
||||
|
||||
// 5. Try to stop the service after it has been removed
|
||||
println!("\n5. Trying to stop the service that was just removed...");
|
||||
match manager.stop(service_name) {
|
||||
Ok(()) => println!(" -> Unexpected Success: Stopped a removed service."),
|
||||
Err(e) => eprintln!(
|
||||
" -> Expected Error: {}. The manager knows the service is gone.",
|
||||
e
|
||||
),
|
||||
}
|
||||
|
||||
// 6. Try to remove the service again
|
||||
println!("\n6. Trying to remove the service again...");
|
||||
match manager.remove(service_name) {
|
||||
Ok(()) => println!(" -> Unexpected Success: Removed a non-existent service."),
|
||||
Err(e) => eprintln!(
|
||||
" -> Expected Error: {}. The manager correctly reports it's not found.",
|
||||
e
|
||||
),
|
||||
}
|
||||
|
||||
println!("\n--- Spaghetti Example Finished ---");
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig};
|
||||
use std::collections::HashMap;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
fn main() {
|
||||
// Initialize logging to see socket discovery in action
|
||||
env_logger::init();
|
||||
|
||||
// 1. Create a service manager for the current platform
|
||||
let manager = match create_service_manager() {
|
||||
Ok(manager) => manager,
|
||||
Err(e) => {
|
||||
eprintln!("Error: Failed to create service manager: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// 2. Define the configuration for our new service
|
||||
let service_name = "com.herocode.examples.simpleservice";
|
||||
let service_config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
// A simple command that runs in a loop
|
||||
binary_path: "/bin/sh".to_string(),
|
||||
args: vec![
|
||||
"-c".to_string(),
|
||||
"while true; do echo 'Simple service is running...'; date; sleep 5; done".to_string(),
|
||||
],
|
||||
working_directory: None,
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
println!("--- Service Manager Example ---");
|
||||
|
||||
// Cleanup from previous runs, if necessary
|
||||
if let Ok(true) = manager.exists(service_name) {
|
||||
println!(
|
||||
"Service '{}' already exists. Cleaning up before starting.",
|
||||
service_name
|
||||
);
|
||||
if let Err(e) = manager.stop(service_name) {
|
||||
println!(
|
||||
"Note: could not stop existing service (it might not be running): {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
if let Err(e) = manager.remove(service_name) {
|
||||
eprintln!("Error: failed to remove existing service: {}", e);
|
||||
return;
|
||||
}
|
||||
println!("Cleanup complete.");
|
||||
}
|
||||
|
||||
// 3. Start the service (creates and starts in one step)
|
||||
println!("\n1. Starting service: '{}'", service_name);
|
||||
match manager.start(&service_config) {
|
||||
Ok(()) => println!("Service '{}' started successfully.", service_name),
|
||||
Err(e) => {
|
||||
eprintln!("Error: Failed to start service '{}': {}", service_name, e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Give it a moment to run
|
||||
println!("\nWaiting for 2 seconds for the service to initialize...");
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
|
||||
// 4. Check the status of the service
|
||||
println!("\n2. Checking service status...");
|
||||
match manager.status(service_name) {
|
||||
Ok(status) => println!("Service status: {:?}", status),
|
||||
Err(e) => eprintln!(
|
||||
"Error: Failed to get status for service '{}': {}",
|
||||
service_name, e
|
||||
),
|
||||
}
|
||||
|
||||
println!("\nLetting the service run for 10 seconds. Check logs if you can.");
|
||||
thread::sleep(Duration::from_secs(10));
|
||||
|
||||
// 5. Stop the service
|
||||
println!("\n3. Stopping service: '{}'", service_name);
|
||||
match manager.stop(service_name) {
|
||||
Ok(()) => println!("Service '{}' stopped successfully.", service_name),
|
||||
Err(e) => eprintln!("Error: Failed to stop service '{}': {}", service_name, e),
|
||||
}
|
||||
|
||||
println!("\nWaiting for 2 seconds for the service to stop...");
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
|
||||
// Check status again
|
||||
println!("\n4. Checking status after stopping...");
|
||||
match manager.status(service_name) {
|
||||
Ok(status) => println!("Service status: {:?}", status),
|
||||
Err(e) => eprintln!(
|
||||
"Error: Failed to get status for service '{}': {}",
|
||||
service_name, e
|
||||
),
|
||||
}
|
||||
|
||||
// 6. Remove the service
|
||||
println!("\n5. Removing service: '{}'", service_name);
|
||||
match manager.remove(service_name) {
|
||||
Ok(()) => println!("Service '{}' removed successfully.", service_name),
|
||||
Err(e) => eprintln!("Error: Failed to remove service '{}': {}", service_name, e),
|
||||
}
|
||||
|
||||
println!("\n--- Example Finished ---");
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
//! Socket Discovery Test
|
||||
//!
|
||||
//! This example demonstrates the zinit socket discovery functionality.
|
||||
//! It shows how the service manager finds available zinit sockets.
|
||||
|
||||
use sal_service_manager::create_service_manager;
|
||||
|
||||
fn main() {
|
||||
// Initialize logging to see socket discovery in action
|
||||
env_logger::init();
|
||||
|
||||
println!("=== Zinit Socket Discovery Test ===");
|
||||
println!("This test demonstrates how the service manager discovers zinit sockets.");
|
||||
println!();
|
||||
|
||||
// Test environment variable
|
||||
if let Ok(socket_path) = std::env::var("ZINIT_SOCKET_PATH") {
|
||||
println!("🔍 ZINIT_SOCKET_PATH environment variable set to: {}", socket_path);
|
||||
} else {
|
||||
println!("🔍 ZINIT_SOCKET_PATH environment variable not set");
|
||||
}
|
||||
println!();
|
||||
|
||||
println!("🚀 Creating service manager...");
|
||||
match create_service_manager() {
|
||||
Ok(_manager) => {
|
||||
println!("✅ Service manager created successfully!");
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
println!("📱 Platform: macOS - Using launchctl");
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
println!("🐧 Platform: Linux - Check logs above for socket discovery details");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Failed to create service manager: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
println!();
|
||||
println!("=== Test Complete ===");
|
||||
println!();
|
||||
println!("To test zinit socket discovery on Linux:");
|
||||
println!("1. Start zinit: zinit -s /tmp/zinit.sock init");
|
||||
println!("2. Run with logging: RUST_LOG=debug cargo run --example socket_discovery_test -p sal-service-manager");
|
||||
println!("3. Or set custom path: ZINIT_SOCKET_PATH=/custom/path.sock RUST_LOG=debug cargo run --example socket_discovery_test -p sal-service-manager");
|
||||
}
|
||||
@@ -1,492 +0,0 @@
|
||||
use crate::{ServiceConfig, ServiceManager, ServiceManagerError, ServiceStatus};
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use tokio::process::Command;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
// Shared runtime for async operations - production-safe initialization
|
||||
static ASYNC_RUNTIME: Lazy<Option<Runtime>> = Lazy::new(|| Runtime::new().ok());
|
||||
|
||||
/// Get the async runtime, creating a temporary one if the static runtime failed
|
||||
fn get_runtime() -> Result<Runtime, ServiceManagerError> {
|
||||
// Try to use the static runtime first
|
||||
if let Some(_runtime) = ASYNC_RUNTIME.as_ref() {
|
||||
// We can't return a reference to the static runtime because we need ownership
|
||||
// for block_on, so we create a new one. This is a reasonable trade-off for safety.
|
||||
Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create async runtime: {}", e))
|
||||
})
|
||||
} else {
|
||||
// Static runtime failed, try to create a new one
|
||||
Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create async runtime: {}", e))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LaunchctlServiceManager {
|
||||
service_prefix: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct LaunchDaemon {
|
||||
#[serde(rename = "Label")]
|
||||
label: String,
|
||||
#[serde(rename = "ProgramArguments")]
|
||||
program_arguments: Vec<String>,
|
||||
#[serde(rename = "WorkingDirectory", skip_serializing_if = "Option::is_none")]
|
||||
working_directory: Option<String>,
|
||||
#[serde(
|
||||
rename = "EnvironmentVariables",
|
||||
skip_serializing_if = "Option::is_none"
|
||||
)]
|
||||
environment_variables: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "KeepAlive", skip_serializing_if = "Option::is_none")]
|
||||
keep_alive: Option<bool>,
|
||||
#[serde(rename = "RunAtLoad")]
|
||||
run_at_load: bool,
|
||||
#[serde(rename = "StandardOutPath", skip_serializing_if = "Option::is_none")]
|
||||
standard_out_path: Option<String>,
|
||||
#[serde(rename = "StandardErrorPath", skip_serializing_if = "Option::is_none")]
|
||||
standard_error_path: Option<String>,
|
||||
}
|
||||
|
||||
impl LaunchctlServiceManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
service_prefix: "tf.ourworld.circles".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_service_label(&self, service_name: &str) -> String {
|
||||
format!("{}.{}", self.service_prefix, service_name)
|
||||
}
|
||||
|
||||
fn get_plist_path(&self, service_name: &str) -> PathBuf {
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".to_string());
|
||||
PathBuf::from(home)
|
||||
.join("Library")
|
||||
.join("LaunchAgents")
|
||||
.join(format!("{}.plist", self.get_service_label(service_name)))
|
||||
}
|
||||
|
||||
fn get_log_path(&self, service_name: &str) -> PathBuf {
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".to_string());
|
||||
PathBuf::from(home)
|
||||
.join("Library")
|
||||
.join("Logs")
|
||||
.join("circles")
|
||||
.join(format!("{}.log", service_name))
|
||||
}
|
||||
|
||||
async fn create_plist(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
let label = self.get_service_label(&config.name);
|
||||
let plist_path = self.get_plist_path(&config.name);
|
||||
let log_path = self.get_log_path(&config.name);
|
||||
|
||||
// Ensure the LaunchAgents directory exists
|
||||
if let Some(parent) = plist_path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await?;
|
||||
}
|
||||
|
||||
// Ensure the logs directory exists
|
||||
if let Some(parent) = log_path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await?;
|
||||
}
|
||||
|
||||
let mut program_arguments = vec![config.binary_path.clone()];
|
||||
program_arguments.extend(config.args.clone());
|
||||
|
||||
let launch_daemon = LaunchDaemon {
|
||||
label: label.clone(),
|
||||
program_arguments,
|
||||
working_directory: config.working_directory.clone(),
|
||||
environment_variables: if config.environment.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(config.environment.clone())
|
||||
},
|
||||
keep_alive: if config.auto_restart {
|
||||
Some(true)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
run_at_load: true,
|
||||
standard_out_path: Some(log_path.to_string_lossy().to_string()),
|
||||
standard_error_path: Some(log_path.to_string_lossy().to_string()),
|
||||
};
|
||||
|
||||
let mut plist_content = Vec::new();
|
||||
plist::to_writer_xml(&mut plist_content, &launch_daemon)
|
||||
.map_err(|e| ServiceManagerError::Other(format!("Failed to serialize plist: {}", e)))?;
|
||||
let plist_content = String::from_utf8(plist_content).map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to convert plist to string: {}", e))
|
||||
})?;
|
||||
|
||||
tokio::fs::write(&plist_path, plist_content).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_launchctl(&self, args: &[&str]) -> Result<String, ServiceManagerError> {
|
||||
let output = Command::new("launchctl").args(args).output().await?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(ServiceManagerError::Other(format!(
|
||||
"launchctl command failed: {}",
|
||||
stderr
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
|
||||
async fn wait_for_service_status(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
use tokio::time::{sleep, timeout, Duration};
|
||||
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
let poll_interval = Duration::from_millis(500);
|
||||
|
||||
let result = timeout(timeout_duration, async {
|
||||
loop {
|
||||
match self.status(service_name) {
|
||||
Ok(ServiceStatus::Running) => {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(ServiceStatus::Failed) => {
|
||||
// Service failed, get error details from logs
|
||||
let logs = self.logs(service_name, Some(20)).unwrap_or_default();
|
||||
let error_msg = if logs.is_empty() {
|
||||
"Service failed to start (no logs available)".to_string()
|
||||
} else {
|
||||
// Extract error lines from logs
|
||||
let error_lines: Vec<&str> = logs
|
||||
.lines()
|
||||
.filter(|line| {
|
||||
line.to_lowercase().contains("error")
|
||||
|| line.to_lowercase().contains("failed")
|
||||
})
|
||||
.take(3)
|
||||
.collect();
|
||||
|
||||
if error_lines.is_empty() {
|
||||
format!(
|
||||
"Service failed to start. Recent logs:\n{}",
|
||||
logs.lines()
|
||||
.rev()
|
||||
.take(5)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter()
|
||||
.rev()
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"Service failed to start. Errors:\n{}",
|
||||
error_lines.join("\n")
|
||||
)
|
||||
}
|
||||
};
|
||||
return Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
error_msg,
|
||||
));
|
||||
}
|
||||
Ok(ServiceStatus::Stopped) | Ok(ServiceStatus::Unknown) => {
|
||||
// Still starting, continue polling
|
||||
sleep(poll_interval).await;
|
||||
}
|
||||
Err(ServiceManagerError::ServiceNotFound(_)) => {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(Ok(())) => Ok(()),
|
||||
Ok(Err(e)) => Err(e),
|
||||
Err(_) => Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceManager for LaunchctlServiceManager {
|
||||
fn exists(&self, service_name: &str) -> Result<bool, ServiceManagerError> {
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
Ok(plist_path.exists())
|
||||
}
|
||||
|
||||
fn start(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
// Use production-safe runtime for async operations
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let label = self.get_service_label(&config.name);
|
||||
|
||||
// Check if service is already loaded
|
||||
let list_output = self.run_launchctl(&["list"]).await?;
|
||||
if list_output.contains(&label) {
|
||||
return Err(ServiceManagerError::ServiceAlreadyExists(
|
||||
config.name.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
// Create the plist file
|
||||
self.create_plist(config).await?;
|
||||
|
||||
// Load the service
|
||||
let plist_path = self.get_plist_path(&config.name);
|
||||
self.run_launchctl(&["load", &plist_path.to_string_lossy()])
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::StartFailed(config.name.clone(), e.to_string())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn start_existing(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let label = self.get_service_label(service_name);
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
|
||||
// Check if plist file exists
|
||||
if !plist_path.exists() {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Check if service is already loaded and running
|
||||
let list_output = self.run_launchctl(&["list"]).await?;
|
||||
if list_output.contains(&label) {
|
||||
// Service is loaded, check if it's running
|
||||
match self.status(service_name)? {
|
||||
ServiceStatus::Running => {
|
||||
return Ok(()); // Already running, nothing to do
|
||||
}
|
||||
_ => {
|
||||
// Service is loaded but not running, try to start it
|
||||
self.run_launchctl(&["start", &label]).await.map_err(|e| {
|
||||
ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
e.to_string(),
|
||||
)
|
||||
})?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Service is not loaded, load it
|
||||
self.run_launchctl(&["load", &plist_path.to_string_lossy()])
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::StartFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn start_and_confirm(
|
||||
&self,
|
||||
config: &ServiceConfig,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// First start the service
|
||||
self.start(config)?;
|
||||
|
||||
// Then wait for confirmation using production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
self.wait_for_service_status(&config.name, timeout_secs)
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
fn start_existing_and_confirm(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// First start the existing service
|
||||
self.start_existing(service_name)?;
|
||||
|
||||
// Then wait for confirmation using production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
self.wait_for_service_status(service_name, timeout_secs)
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
fn stop(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let _label = self.get_service_label(service_name);
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
|
||||
// Unload the service
|
||||
self.run_launchctl(&["unload", &plist_path.to_string_lossy()])
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::StopFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn restart(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
// For launchctl, we stop and start
|
||||
if let Err(e) = self.stop(service_name) {
|
||||
// If stop fails because service doesn't exist, that's ok for restart
|
||||
if !matches!(e, ServiceManagerError::ServiceNotFound(_)) {
|
||||
return Err(ServiceManagerError::RestartFailed(
|
||||
service_name.to_string(),
|
||||
e.to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// We need the config to restart, but we don't have it stored
|
||||
// For now, return an error - in a real implementation we might store configs
|
||||
Err(ServiceManagerError::RestartFailed(
|
||||
service_name.to_string(),
|
||||
"Restart requires re-providing service configuration".to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
fn status(&self, service_name: &str) -> Result<ServiceStatus, ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let label = self.get_service_label(service_name);
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
|
||||
// First check if the plist file exists
|
||||
if !plist_path.exists() {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let list_output = self.run_launchctl(&["list"]).await?;
|
||||
|
||||
if !list_output.contains(&label) {
|
||||
return Ok(ServiceStatus::Stopped);
|
||||
}
|
||||
|
||||
// Get detailed status
|
||||
match self.run_launchctl(&["list", &label]).await {
|
||||
Ok(output) => {
|
||||
if output.contains("\"PID\" = ") {
|
||||
Ok(ServiceStatus::Running)
|
||||
} else if output.contains("\"LastExitStatus\" = ") {
|
||||
Ok(ServiceStatus::Failed)
|
||||
} else {
|
||||
Ok(ServiceStatus::Unknown)
|
||||
}
|
||||
}
|
||||
Err(_) => Ok(ServiceStatus::Stopped),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn logs(
|
||||
&self,
|
||||
service_name: &str,
|
||||
lines: Option<usize>,
|
||||
) -> Result<String, ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let log_path = self.get_log_path(service_name);
|
||||
|
||||
if !log_path.exists() {
|
||||
return Ok(String::new());
|
||||
}
|
||||
|
||||
match lines {
|
||||
Some(n) => {
|
||||
let output = Command::new("tail")
|
||||
.args(&["-n", &n.to_string(), &log_path.to_string_lossy()])
|
||||
.output()
|
||||
.await?;
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
None => {
|
||||
let content = tokio::fs::read_to_string(&log_path).await?;
|
||||
Ok(content)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn list(&self) -> Result<Vec<String>, ServiceManagerError> {
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let list_output = self.run_launchctl(&["list"]).await?;
|
||||
|
||||
let services: Vec<String> = list_output
|
||||
.lines()
|
||||
.filter_map(|line| {
|
||||
if line.contains(&self.service_prefix) {
|
||||
// Extract service name from label
|
||||
line.split_whitespace()
|
||||
.last()
|
||||
.and_then(|label| {
|
||||
label.strip_prefix(&format!("{}.", self.service_prefix))
|
||||
})
|
||||
.map(|s| s.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(services)
|
||||
})
|
||||
}
|
||||
|
||||
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
// Try to stop the service first, but don't fail if it's already stopped or doesn't exist
|
||||
if let Err(e) = self.stop(service_name) {
|
||||
// Log the error but continue with removal
|
||||
log::warn!(
|
||||
"Failed to stop service '{}' before removal: {}",
|
||||
service_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
|
||||
// Remove the plist file using production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime.block_on(async {
|
||||
let plist_path = self.get_plist_path(service_name);
|
||||
if plist_path.exists() {
|
||||
tokio::fs::remove_file(&plist_path).await?;
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ServiceManagerError {
|
||||
#[error("Service '{0}' not found")]
|
||||
ServiceNotFound(String),
|
||||
#[error("Service '{0}' already exists")]
|
||||
ServiceAlreadyExists(String),
|
||||
#[error("Failed to start service '{0}': {1}")]
|
||||
StartFailed(String, String),
|
||||
#[error("Failed to stop service '{0}': {1}")]
|
||||
StopFailed(String, String),
|
||||
#[error("Failed to restart service '{0}': {1}")]
|
||||
RestartFailed(String, String),
|
||||
#[error("Failed to get logs for service '{0}': {1}")]
|
||||
LogsFailed(String, String),
|
||||
#[error("IO error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
#[error("Service manager error: {0}")]
|
||||
Other(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServiceConfig {
|
||||
pub name: String,
|
||||
pub binary_path: String,
|
||||
pub args: Vec<String>,
|
||||
pub working_directory: Option<String>,
|
||||
pub environment: HashMap<String, String>,
|
||||
pub auto_restart: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ServiceStatus {
|
||||
Running,
|
||||
Stopped,
|
||||
Failed,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
pub trait ServiceManager: Send + Sync {
|
||||
/// Check if a service exists
|
||||
fn exists(&self, service_name: &str) -> Result<bool, ServiceManagerError>;
|
||||
|
||||
/// Start a service with the given configuration
|
||||
fn start(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Start an existing service by name (load existing plist/config)
|
||||
fn start_existing(&self, service_name: &str) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Start a service and wait for confirmation that it's running or failed
|
||||
fn start_and_confirm(
|
||||
&self,
|
||||
config: &ServiceConfig,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Start an existing service and wait for confirmation that it's running or failed
|
||||
fn start_existing_and_confirm(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Stop a service by name
|
||||
fn stop(&self, service_name: &str) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Restart a service by name
|
||||
fn restart(&self, service_name: &str) -> Result<(), ServiceManagerError>;
|
||||
|
||||
/// Get the status of a service
|
||||
fn status(&self, service_name: &str) -> Result<ServiceStatus, ServiceManagerError>;
|
||||
|
||||
/// Get logs for a service
|
||||
fn logs(&self, service_name: &str, lines: Option<usize>)
|
||||
-> Result<String, ServiceManagerError>;
|
||||
|
||||
/// List all managed services
|
||||
fn list(&self) -> Result<Vec<String>, ServiceManagerError>;
|
||||
|
||||
/// Remove a service configuration (stop if running)
|
||||
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError>;
|
||||
}
|
||||
|
||||
// Platform-specific implementations
|
||||
#[cfg(target_os = "macos")]
|
||||
mod launchctl;
|
||||
#[cfg(target_os = "macos")]
|
||||
pub use launchctl::LaunchctlServiceManager;
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
mod systemd;
|
||||
#[cfg(target_os = "linux")]
|
||||
pub use systemd::SystemdServiceManager;
|
||||
|
||||
mod zinit;
|
||||
pub use zinit::ZinitServiceManager;
|
||||
|
||||
#[cfg(feature = "rhai")]
|
||||
pub mod rhai;
|
||||
|
||||
/// Discover available zinit socket paths
|
||||
///
|
||||
/// This function checks for zinit sockets in the following order:
|
||||
/// 1. Environment variable ZINIT_SOCKET_PATH (if set)
|
||||
/// 2. Common socket locations with connectivity testing
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Returns the first working socket path found, or None if no working zinit server is detected.
|
||||
#[cfg(target_os = "linux")]
|
||||
fn discover_zinit_socket() -> Option<String> {
|
||||
// First check environment variable
|
||||
if let Ok(env_socket_path) = std::env::var("ZINIT_SOCKET_PATH") {
|
||||
log::debug!("Checking ZINIT_SOCKET_PATH: {}", env_socket_path);
|
||||
if test_zinit_socket(&env_socket_path) {
|
||||
log::info!(
|
||||
"Using zinit socket from ZINIT_SOCKET_PATH: {}",
|
||||
env_socket_path
|
||||
);
|
||||
return Some(env_socket_path);
|
||||
} else {
|
||||
log::warn!(
|
||||
"ZINIT_SOCKET_PATH specified but socket is not accessible: {}",
|
||||
env_socket_path
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Try common socket locations
|
||||
let common_paths = [
|
||||
"/var/run/zinit.sock",
|
||||
"/tmp/zinit.sock",
|
||||
"/run/zinit.sock",
|
||||
"./zinit.sock",
|
||||
];
|
||||
|
||||
log::debug!("Discovering zinit socket from common locations...");
|
||||
for path in &common_paths {
|
||||
log::debug!("Testing socket path: {}", path);
|
||||
if test_zinit_socket(path) {
|
||||
log::info!("Found working zinit socket at: {}", path);
|
||||
return Some(path.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
log::debug!("No working zinit socket found");
|
||||
None
|
||||
}
|
||||
|
||||
/// Test if a zinit socket is accessible and responsive
|
||||
///
|
||||
/// This function attempts to create a ZinitServiceManager and perform a basic
|
||||
/// connectivity test by listing services.
|
||||
#[cfg(target_os = "linux")]
|
||||
fn test_zinit_socket(socket_path: &str) -> bool {
|
||||
// Check if socket file exists first
|
||||
if !std::path::Path::new(socket_path).exists() {
|
||||
log::debug!("Socket file does not exist: {}", socket_path);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to create a manager and test basic connectivity
|
||||
match ZinitServiceManager::new(socket_path) {
|
||||
Ok(manager) => {
|
||||
// Test basic connectivity by trying to list services
|
||||
match manager.list() {
|
||||
Ok(_) => {
|
||||
log::debug!("Socket {} is responsive", socket_path);
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
log::debug!("Socket {} exists but not responsive: {}", socket_path, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::debug!("Failed to create manager for socket {}: {}", socket_path, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a service manager appropriate for the current platform
|
||||
///
|
||||
/// - On macOS: Uses launchctl for service management
|
||||
/// - On Linux: Uses zinit for service management with systemd fallback
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Returns a Result containing the service manager or an error if initialization fails.
|
||||
/// On Linux, it first tries to discover a working zinit socket. If no zinit server is found,
|
||||
/// it will fall back to systemd.
|
||||
///
|
||||
/// # Environment Variables
|
||||
///
|
||||
/// - `ZINIT_SOCKET_PATH`: Specifies the zinit socket path (Linux only)
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `ServiceManagerError` if:
|
||||
/// - The platform is not supported (Windows, etc.)
|
||||
/// - Service manager initialization fails on all available backends
|
||||
pub fn create_service_manager() -> Result<Box<dyn ServiceManager>, ServiceManagerError> {
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
Ok(Box::new(LaunchctlServiceManager::new()))
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
// Try to discover a working zinit socket
|
||||
if let Some(socket_path) = discover_zinit_socket() {
|
||||
match ZinitServiceManager::new(&socket_path) {
|
||||
Ok(zinit_manager) => {
|
||||
log::info!("Using zinit service manager with socket: {}", socket_path);
|
||||
return Ok(Box::new(zinit_manager));
|
||||
}
|
||||
Err(zinit_error) => {
|
||||
log::warn!(
|
||||
"Failed to create zinit manager for discovered socket {}: {}",
|
||||
socket_path,
|
||||
zinit_error
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log::info!("No running zinit server detected. To use zinit, start it with: zinit -s /tmp/zinit.sock init");
|
||||
}
|
||||
|
||||
// Fallback to systemd
|
||||
log::info!("Falling back to systemd service manager");
|
||||
Ok(Box::new(SystemdServiceManager::new()))
|
||||
}
|
||||
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
|
||||
{
|
||||
Err(ServiceManagerError::Other(
|
||||
"Service manager not implemented for this platform".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a service manager for zinit with a custom socket path
|
||||
///
|
||||
/// This is useful when zinit is running with a non-default socket path
|
||||
pub fn create_zinit_service_manager(
|
||||
socket_path: &str,
|
||||
) -> Result<Box<dyn ServiceManager>, ServiceManagerError> {
|
||||
Ok(Box::new(ZinitServiceManager::new(socket_path)?))
|
||||
}
|
||||
|
||||
/// Create a service manager for systemd (Linux alternative)
|
||||
///
|
||||
/// This creates a systemd-based service manager as an alternative to zinit on Linux
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn create_systemd_service_manager() -> Box<dyn ServiceManager> {
|
||||
Box::new(SystemdServiceManager::new())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_service_manager() {
|
||||
// This test ensures the service manager can be created without panicking
|
||||
let result = create_service_manager();
|
||||
assert!(result.is_ok(), "Service manager creation should succeed");
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[test]
|
||||
fn test_socket_discovery_with_env_var() {
|
||||
// Test that environment variable is respected
|
||||
std::env::set_var("ZINIT_SOCKET_PATH", "/test/path.sock");
|
||||
|
||||
// The discover function should check the env var first
|
||||
// Since the socket doesn't exist, it should return None, but we can't test
|
||||
// the actual discovery logic without a real socket
|
||||
|
||||
std::env::remove_var("ZINIT_SOCKET_PATH");
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[test]
|
||||
fn test_socket_discovery_without_env_var() {
|
||||
// Ensure env var is not set
|
||||
std::env::remove_var("ZINIT_SOCKET_PATH");
|
||||
|
||||
// The discover function should try common paths
|
||||
// Since no zinit is running, it should return None
|
||||
let result = discover_zinit_socket();
|
||||
|
||||
// This is expected to be None in test environment
|
||||
assert!(
|
||||
result.is_none(),
|
||||
"Should return None when no zinit server is running"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
//! Rhai integration for the service manager module
|
||||
//!
|
||||
//! This module provides Rhai scripting support for service management operations.
|
||||
|
||||
use crate::{create_service_manager, ServiceConfig, ServiceManager};
|
||||
use rhai::{Engine, EvalAltResult, Map};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A wrapper around ServiceManager that can be used in Rhai
|
||||
#[derive(Clone)]
|
||||
pub struct RhaiServiceManager {
|
||||
inner: Arc<Box<dyn ServiceManager>>,
|
||||
}
|
||||
|
||||
impl RhaiServiceManager {
|
||||
pub fn new() -> Result<Self, Box<EvalAltResult>> {
|
||||
let manager = create_service_manager()
|
||||
.map_err(|e| format!("Failed to create service manager: {}", e))?;
|
||||
Ok(Self {
|
||||
inner: Arc::new(manager),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Register the service manager module with a Rhai engine
|
||||
pub fn register_service_manager_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
// Factory function to create service manager
|
||||
engine.register_type::<RhaiServiceManager>();
|
||||
engine.register_fn(
|
||||
"create_service_manager",
|
||||
|| -> Result<RhaiServiceManager, Box<EvalAltResult>> { RhaiServiceManager::new() },
|
||||
);
|
||||
|
||||
// Service management functions
|
||||
engine.register_fn(
|
||||
"start",
|
||||
|manager: &mut RhaiServiceManager, config: Map| -> Result<(), Box<EvalAltResult>> {
|
||||
let service_config = map_to_service_config(config)?;
|
||||
manager
|
||||
.inner
|
||||
.start(&service_config)
|
||||
.map_err(|e| format!("Failed to start service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"stop",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.stop(&service_name)
|
||||
.map_err(|e| format!("Failed to stop service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"restart",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.restart(&service_name)
|
||||
.map_err(|e| format!("Failed to restart service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"status",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<String, Box<EvalAltResult>> {
|
||||
let status = manager
|
||||
.inner
|
||||
.status(&service_name)
|
||||
.map_err(|e| format!("Failed to get service status: {}", e))?;
|
||||
Ok(format!("{:?}", status))
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"logs",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String,
|
||||
lines: i64|
|
||||
-> Result<String, Box<EvalAltResult>> {
|
||||
let lines_opt = if lines > 0 {
|
||||
Some(lines as usize)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
manager
|
||||
.inner
|
||||
.logs(&service_name, lines_opt)
|
||||
.map_err(|e| format!("Failed to get service logs: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"list",
|
||||
|manager: &mut RhaiServiceManager| -> Result<Vec<String>, Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.list()
|
||||
.map_err(|e| format!("Failed to list services: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"remove",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.remove(&service_name)
|
||||
.map_err(|e| format!("Failed to remove service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"exists",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String|
|
||||
-> Result<bool, Box<EvalAltResult>> {
|
||||
manager
|
||||
.inner
|
||||
.exists(&service_name)
|
||||
.map_err(|e| format!("Failed to check if service exists: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"start_and_confirm",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
config: Map,
|
||||
timeout_secs: i64|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
let service_config = map_to_service_config(config)?;
|
||||
let timeout = if timeout_secs > 0 {
|
||||
timeout_secs as u64
|
||||
} else {
|
||||
30
|
||||
};
|
||||
manager
|
||||
.inner
|
||||
.start_and_confirm(&service_config, timeout)
|
||||
.map_err(|e| format!("Failed to start and confirm service: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
engine.register_fn(
|
||||
"start_existing_and_confirm",
|
||||
|manager: &mut RhaiServiceManager,
|
||||
service_name: String,
|
||||
timeout_secs: i64|
|
||||
-> Result<(), Box<EvalAltResult>> {
|
||||
let timeout = if timeout_secs > 0 {
|
||||
timeout_secs as u64
|
||||
} else {
|
||||
30
|
||||
};
|
||||
manager
|
||||
.inner
|
||||
.start_existing_and_confirm(&service_name, timeout)
|
||||
.map_err(|e| format!("Failed to start existing service and confirm: {}", e).into())
|
||||
},
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert a Rhai Map to a ServiceConfig
|
||||
fn map_to_service_config(map: Map) -> Result<ServiceConfig, Box<EvalAltResult>> {
|
||||
let name = map
|
||||
.get("name")
|
||||
.and_then(|v| v.clone().into_string().ok())
|
||||
.ok_or("Service config must have a 'name' field")?;
|
||||
|
||||
let binary_path = map
|
||||
.get("binary_path")
|
||||
.and_then(|v| v.clone().into_string().ok())
|
||||
.ok_or("Service config must have a 'binary_path' field")?;
|
||||
|
||||
let args = map
|
||||
.get("args")
|
||||
.and_then(|v| v.clone().try_cast::<rhai::Array>())
|
||||
.map(|arr| {
|
||||
arr.into_iter()
|
||||
.filter_map(|v| v.into_string().ok())
|
||||
.collect::<Vec<String>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let working_directory = map
|
||||
.get("working_directory")
|
||||
.and_then(|v| v.clone().into_string().ok());
|
||||
|
||||
let environment = map
|
||||
.get("environment")
|
||||
.and_then(|v| v.clone().try_cast::<Map>())
|
||||
.map(|env_map| {
|
||||
env_map
|
||||
.into_iter()
|
||||
.filter_map(|(k, v)| v.into_string().ok().map(|val| (k.to_string(), val)))
|
||||
.collect::<HashMap<String, String>>()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let auto_restart = map
|
||||
.get("auto_restart")
|
||||
.and_then(|v| v.as_bool().ok())
|
||||
.unwrap_or(false);
|
||||
|
||||
Ok(ServiceConfig {
|
||||
name,
|
||||
binary_path,
|
||||
args,
|
||||
working_directory,
|
||||
environment,
|
||||
auto_restart,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rhai::{Engine, Map};
|
||||
|
||||
#[test]
|
||||
fn test_register_service_manager_module() {
|
||||
let mut engine = Engine::new();
|
||||
register_service_manager_module(&mut engine).unwrap();
|
||||
|
||||
// Test that the functions are registered
|
||||
// Note: Rhai doesn't expose a public API to check if functions are registered
|
||||
// So we'll just verify the module registration doesn't panic
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_map_to_service_config() {
|
||||
let mut map = Map::new();
|
||||
map.insert("name".into(), "test-service".into());
|
||||
map.insert("binary_path".into(), "/bin/echo".into());
|
||||
map.insert("auto_restart".into(), true.into());
|
||||
|
||||
let config = map_to_service_config(map).unwrap();
|
||||
assert_eq!(config.name, "test-service");
|
||||
assert_eq!(config.binary_path, "/bin/echo");
|
||||
assert_eq!(config.auto_restart, true);
|
||||
}
|
||||
}
|
||||
@@ -1,434 +0,0 @@
|
||||
use crate::{ServiceConfig, ServiceManager, ServiceManagerError, ServiceStatus};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SystemdServiceManager {
|
||||
service_prefix: String,
|
||||
user_mode: bool,
|
||||
}
|
||||
|
||||
impl SystemdServiceManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
service_prefix: "sal".to_string(),
|
||||
user_mode: true, // Default to user services for safety
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_system() -> Self {
|
||||
Self {
|
||||
service_prefix: "sal".to_string(),
|
||||
user_mode: false, // System-wide services (requires root)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_service_name(&self, service_name: &str) -> String {
|
||||
format!("{}-{}.service", self.service_prefix, service_name)
|
||||
}
|
||||
|
||||
fn get_unit_file_path(&self, service_name: &str) -> PathBuf {
|
||||
let service_file = self.get_service_name(service_name);
|
||||
if self.user_mode {
|
||||
// User service directory
|
||||
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".to_string());
|
||||
PathBuf::from(home)
|
||||
.join(".config")
|
||||
.join("systemd")
|
||||
.join("user")
|
||||
.join(service_file)
|
||||
} else {
|
||||
// System service directory
|
||||
PathBuf::from("/etc/systemd/system").join(service_file)
|
||||
}
|
||||
}
|
||||
|
||||
fn run_systemctl(&self, args: &[&str]) -> Result<String, ServiceManagerError> {
|
||||
let mut cmd = Command::new("systemctl");
|
||||
|
||||
if self.user_mode {
|
||||
cmd.arg("--user");
|
||||
}
|
||||
|
||||
cmd.args(args);
|
||||
|
||||
let output = cmd
|
||||
.output()
|
||||
.map_err(|e| ServiceManagerError::Other(format!("Failed to run systemctl: {}", e)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(ServiceManagerError::Other(format!(
|
||||
"systemctl command failed: {}",
|
||||
stderr
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
|
||||
fn create_unit_file(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
let unit_path = self.get_unit_file_path(&config.name);
|
||||
|
||||
// Ensure the directory exists
|
||||
if let Some(parent) = unit_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create unit directory: {}", e))
|
||||
})?;
|
||||
}
|
||||
|
||||
// Create the unit file content
|
||||
let mut unit_content = String::new();
|
||||
unit_content.push_str("[Unit]\n");
|
||||
unit_content.push_str(&format!("Description={} service\n", config.name));
|
||||
unit_content.push_str("After=network.target\n\n");
|
||||
|
||||
unit_content.push_str("[Service]\n");
|
||||
unit_content.push_str("Type=simple\n");
|
||||
|
||||
// Build the ExecStart command
|
||||
let mut exec_start = config.binary_path.clone();
|
||||
for arg in &config.args {
|
||||
exec_start.push(' ');
|
||||
exec_start.push_str(arg);
|
||||
}
|
||||
unit_content.push_str(&format!("ExecStart={}\n", exec_start));
|
||||
|
||||
if let Some(working_dir) = &config.working_directory {
|
||||
unit_content.push_str(&format!("WorkingDirectory={}\n", working_dir));
|
||||
}
|
||||
|
||||
// Add environment variables
|
||||
for (key, value) in &config.environment {
|
||||
unit_content.push_str(&format!("Environment=\"{}={}\"\n", key, value));
|
||||
}
|
||||
|
||||
if config.auto_restart {
|
||||
unit_content.push_str("Restart=always\n");
|
||||
unit_content.push_str("RestartSec=5\n");
|
||||
}
|
||||
|
||||
unit_content.push_str("\n[Install]\n");
|
||||
unit_content.push_str("WantedBy=default.target\n");
|
||||
|
||||
// Write the unit file
|
||||
fs::write(&unit_path, unit_content)
|
||||
.map_err(|e| ServiceManagerError::Other(format!("Failed to write unit file: {}", e)))?;
|
||||
|
||||
// Reload systemd to pick up the new unit file
|
||||
self.run_systemctl(&["daemon-reload"])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceManager for SystemdServiceManager {
|
||||
fn exists(&self, service_name: &str) -> Result<bool, ServiceManagerError> {
|
||||
let unit_path = self.get_unit_file_path(service_name);
|
||||
Ok(unit_path.exists())
|
||||
}
|
||||
|
||||
fn start(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
let service_name = self.get_service_name(&config.name);
|
||||
|
||||
// Check if service already exists and is running
|
||||
if self.exists(&config.name)? {
|
||||
match self.status(&config.name)? {
|
||||
ServiceStatus::Running => {
|
||||
return Err(ServiceManagerError::ServiceAlreadyExists(
|
||||
config.name.clone(),
|
||||
));
|
||||
}
|
||||
_ => {
|
||||
// Service exists but not running, we can start it
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create the unit file
|
||||
self.create_unit_file(config)?;
|
||||
}
|
||||
|
||||
// Enable and start the service
|
||||
self.run_systemctl(&["enable", &service_name])
|
||||
.map_err(|e| ServiceManagerError::StartFailed(config.name.clone(), e.to_string()))?;
|
||||
|
||||
self.run_systemctl(&["start", &service_name])
|
||||
.map_err(|e| ServiceManagerError::StartFailed(config.name.clone(), e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_existing(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if unit file exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Check if already running
|
||||
match self.status(service_name)? {
|
||||
ServiceStatus::Running => {
|
||||
return Ok(()); // Already running, nothing to do
|
||||
}
|
||||
_ => {
|
||||
// Start the service
|
||||
self.run_systemctl(&["start", &service_unit]).map_err(|e| {
|
||||
ServiceManagerError::StartFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_and_confirm(
|
||||
&self,
|
||||
config: &ServiceConfig,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// Start the service first
|
||||
self.start(config)?;
|
||||
|
||||
// Wait for confirmation with timeout
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout_duration = std::time::Duration::from_secs(timeout_secs);
|
||||
|
||||
while start_time.elapsed() < timeout_duration {
|
||||
match self.status(&config.name) {
|
||||
Ok(ServiceStatus::Running) => return Ok(()),
|
||||
Ok(ServiceStatus::Failed) => {
|
||||
return Err(ServiceManagerError::StartFailed(
|
||||
config.name.clone(),
|
||||
"Service failed to start".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(_) => {
|
||||
// Still starting, wait a bit
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
Err(_) => {
|
||||
// Service might not exist yet, wait a bit
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(ServiceManagerError::StartFailed(
|
||||
config.name.clone(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
))
|
||||
}
|
||||
|
||||
fn start_existing_and_confirm(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// Start the existing service first
|
||||
self.start_existing(service_name)?;
|
||||
|
||||
// Wait for confirmation with timeout
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout_duration = std::time::Duration::from_secs(timeout_secs);
|
||||
|
||||
while start_time.elapsed() < timeout_duration {
|
||||
match self.status(service_name) {
|
||||
Ok(ServiceStatus::Running) => return Ok(()),
|
||||
Ok(ServiceStatus::Failed) => {
|
||||
return Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
"Service failed to start".to_string(),
|
||||
));
|
||||
}
|
||||
Ok(_) => {
|
||||
// Still starting, wait a bit
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
Err(_) => {
|
||||
// Service might not exist yet, wait a bit
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
))
|
||||
}
|
||||
|
||||
fn stop(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Stop the service
|
||||
self.run_systemctl(&["stop", &service_unit]).map_err(|e| {
|
||||
ServiceManagerError::StopFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn restart(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Restart the service
|
||||
self.run_systemctl(&["restart", &service_unit])
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::RestartFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn status(&self, service_name: &str) -> Result<ServiceStatus, ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Get service status
|
||||
let output = self
|
||||
.run_systemctl(&["is-active", &service_unit])
|
||||
.unwrap_or_else(|_| "unknown".to_string());
|
||||
|
||||
let status = match output.trim() {
|
||||
"active" => ServiceStatus::Running,
|
||||
"inactive" => ServiceStatus::Stopped,
|
||||
"failed" => ServiceStatus::Failed,
|
||||
_ => ServiceStatus::Unknown,
|
||||
};
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
fn logs(
|
||||
&self,
|
||||
service_name: &str,
|
||||
lines: Option<usize>,
|
||||
) -> Result<String, ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Build journalctl command
|
||||
let mut args = vec!["--unit", &service_unit, "--no-pager"];
|
||||
let lines_arg;
|
||||
if let Some(n) = lines {
|
||||
lines_arg = format!("--lines={}", n);
|
||||
args.push(&lines_arg);
|
||||
}
|
||||
|
||||
// Use journalctl to get logs
|
||||
let mut cmd = std::process::Command::new("journalctl");
|
||||
if self.user_mode {
|
||||
cmd.arg("--user");
|
||||
}
|
||||
cmd.args(&args);
|
||||
|
||||
let output = cmd.output().map_err(|e| {
|
||||
ServiceManagerError::LogsFailed(
|
||||
service_name.to_string(),
|
||||
format!("Failed to run journalctl: {}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(ServiceManagerError::LogsFailed(
|
||||
service_name.to_string(),
|
||||
format!("journalctl command failed: {}", stderr),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
|
||||
fn list(&self) -> Result<Vec<String>, ServiceManagerError> {
|
||||
// List all services with our prefix
|
||||
let output =
|
||||
self.run_systemctl(&["list-units", "--type=service", "--all", "--no-pager"])?;
|
||||
|
||||
let mut services = Vec::new();
|
||||
for line in output.lines() {
|
||||
if line.contains(&format!("{}-", self.service_prefix)) {
|
||||
// Extract service name from the line
|
||||
if let Some(unit_name) = line.split_whitespace().next() {
|
||||
if let Some(service_name) = unit_name.strip_suffix(".service") {
|
||||
if let Some(name) =
|
||||
service_name.strip_prefix(&format!("{}-", self.service_prefix))
|
||||
{
|
||||
services.push(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(services)
|
||||
}
|
||||
|
||||
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let service_unit = self.get_service_name(service_name);
|
||||
|
||||
// Check if service exists
|
||||
if !self.exists(service_name)? {
|
||||
return Err(ServiceManagerError::ServiceNotFound(
|
||||
service_name.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Try to stop the service first, but don't fail if it's already stopped
|
||||
if let Err(e) = self.stop(service_name) {
|
||||
log::warn!(
|
||||
"Failed to stop service '{}' before removal: {}",
|
||||
service_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
|
||||
// Disable the service
|
||||
if let Err(e) = self.run_systemctl(&["disable", &service_unit]) {
|
||||
log::warn!("Failed to disable service '{}': {}", service_name, e);
|
||||
}
|
||||
|
||||
// Remove the unit file
|
||||
let unit_path = self.get_unit_file_path(service_name);
|
||||
if unit_path.exists() {
|
||||
std::fs::remove_file(&unit_path).map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to remove unit file: {}", e))
|
||||
})?;
|
||||
}
|
||||
|
||||
// Reload systemd to pick up the changes
|
||||
self.run_systemctl(&["daemon-reload"])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,379 +0,0 @@
|
||||
use crate::{ServiceConfig, ServiceManager, ServiceManagerError, ServiceStatus};
|
||||
use once_cell::sync::Lazy;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio::time::timeout;
|
||||
use zinit_client::{ServiceStatus as ZinitServiceStatus, ZinitClient, ZinitError};
|
||||
|
||||
// Shared runtime for async operations - production-safe initialization
|
||||
static ASYNC_RUNTIME: Lazy<Option<Runtime>> = Lazy::new(|| Runtime::new().ok());
|
||||
|
||||
/// Get the async runtime, creating a temporary one if the static runtime failed
|
||||
fn get_runtime() -> Result<Runtime, ServiceManagerError> {
|
||||
// Try to use the static runtime first
|
||||
if let Some(_runtime) = ASYNC_RUNTIME.as_ref() {
|
||||
// We can't return a reference to the static runtime because we need ownership
|
||||
// for block_on, so we create a new one. This is a reasonable trade-off for safety.
|
||||
Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create async runtime: {}", e))
|
||||
})
|
||||
} else {
|
||||
// Static runtime failed, try to create a new one
|
||||
Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create async runtime: {}", e))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ZinitServiceManager {
|
||||
client: Arc<ZinitClient>,
|
||||
}
|
||||
|
||||
impl ZinitServiceManager {
|
||||
pub fn new(socket_path: &str) -> Result<Self, ServiceManagerError> {
|
||||
// Create the base zinit client directly
|
||||
let client = Arc::new(ZinitClient::new(socket_path));
|
||||
|
||||
Ok(ZinitServiceManager { client })
|
||||
}
|
||||
|
||||
/// Execute an async operation using the shared runtime or current context
|
||||
fn execute_async<F, T>(&self, operation: F) -> Result<T, ServiceManagerError>
|
||||
where
|
||||
F: std::future::Future<Output = Result<T, ZinitError>> + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
// Check if we're already in a tokio runtime context
|
||||
if let Ok(_handle) = tokio::runtime::Handle::try_current() {
|
||||
// We're in an async context, use spawn_blocking to avoid nested runtime
|
||||
let result = std::thread::spawn(
|
||||
move || -> Result<Result<T, ZinitError>, ServiceManagerError> {
|
||||
let rt = Runtime::new().map_err(|e| {
|
||||
ServiceManagerError::Other(format!("Failed to create runtime: {}", e))
|
||||
})?;
|
||||
Ok(rt.block_on(operation))
|
||||
},
|
||||
)
|
||||
.join()
|
||||
.map_err(|_| ServiceManagerError::Other("Thread join failed".to_string()))?;
|
||||
result?.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
} else {
|
||||
// No current runtime, use production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime
|
||||
.block_on(operation)
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute an async operation with timeout using the shared runtime or current context
|
||||
fn execute_async_with_timeout<F, T>(
|
||||
&self,
|
||||
operation: F,
|
||||
timeout_secs: u64,
|
||||
) -> Result<T, ServiceManagerError>
|
||||
where
|
||||
F: std::future::Future<Output = Result<T, ZinitError>> + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
let timeout_op = timeout(timeout_duration, operation);
|
||||
|
||||
// Check if we're already in a tokio runtime context
|
||||
if let Ok(_handle) = tokio::runtime::Handle::try_current() {
|
||||
// We're in an async context, use spawn_blocking to avoid nested runtime
|
||||
let result = std::thread::spawn(move || {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(timeout_op)
|
||||
})
|
||||
.join()
|
||||
.map_err(|_| ServiceManagerError::Other("Thread join failed".to_string()))?;
|
||||
|
||||
result
|
||||
.map_err(|_| {
|
||||
ServiceManagerError::Other(format!(
|
||||
"Operation timed out after {} seconds",
|
||||
timeout_secs
|
||||
))
|
||||
})?
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
} else {
|
||||
// No current runtime, use production-safe runtime
|
||||
let runtime = get_runtime()?;
|
||||
runtime
|
||||
.block_on(timeout_op)
|
||||
.map_err(|_| {
|
||||
ServiceManagerError::Other(format!(
|
||||
"Operation timed out after {} seconds",
|
||||
timeout_secs
|
||||
))
|
||||
})?
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceManager for ZinitServiceManager {
|
||||
fn exists(&self, service_name: &str) -> Result<bool, ServiceManagerError> {
|
||||
let status_res = self.status(service_name);
|
||||
match status_res {
|
||||
Ok(_) => Ok(true),
|
||||
Err(ServiceManagerError::ServiceNotFound(_)) => Ok(false),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
fn start(&self, config: &ServiceConfig) -> Result<(), ServiceManagerError> {
|
||||
// Build the exec command with args
|
||||
let mut exec_command = config.binary_path.clone();
|
||||
if !config.args.is_empty() {
|
||||
exec_command.push(' ');
|
||||
exec_command.push_str(&config.args.join(" "));
|
||||
}
|
||||
|
||||
// Create zinit-compatible service configuration
|
||||
let mut service_config = json!({
|
||||
"exec": exec_command,
|
||||
"oneshot": !config.auto_restart, // zinit uses oneshot, not restart
|
||||
"env": config.environment,
|
||||
});
|
||||
|
||||
// Add optional fields if present
|
||||
if let Some(ref working_dir) = config.working_directory {
|
||||
// Zinit doesn't support working_directory directly, so we need to modify the exec command
|
||||
let cd_command = format!("cd {} && {}", working_dir, exec_command);
|
||||
service_config["exec"] = json!(cd_command);
|
||||
}
|
||||
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name = config.name.clone();
|
||||
self.execute_async(
|
||||
async move { client.create_service(&service_name, service_config).await },
|
||||
)
|
||||
.map_err(|e| ServiceManagerError::StartFailed(config.name.clone(), e.to_string()))?;
|
||||
|
||||
self.start_existing(&config.name)
|
||||
}
|
||||
|
||||
fn start_existing(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let service_name_for_error = service_name.to_string();
|
||||
self.execute_async(async move { client.start(&service_name_owned).await })
|
||||
.map_err(|e| ServiceManagerError::StartFailed(service_name_for_error, e.to_string()))
|
||||
}
|
||||
|
||||
fn start_and_confirm(
|
||||
&self,
|
||||
config: &ServiceConfig,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// Start the service first
|
||||
self.start(config)?;
|
||||
|
||||
// Wait for confirmation with timeout using the shared runtime
|
||||
self.execute_async_with_timeout(
|
||||
async move {
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
|
||||
while start_time.elapsed() < timeout_duration {
|
||||
// We need to call status in a blocking way from within the async context
|
||||
// For now, we'll use a simple polling approach
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
// Return a timeout error that will be handled by execute_async_with_timeout
|
||||
// Use a generic error since we don't know the exact ZinitError variants
|
||||
Err(ZinitError::from(std::io::Error::new(
|
||||
std::io::ErrorKind::TimedOut,
|
||||
"Timeout waiting for service confirmation",
|
||||
)))
|
||||
},
|
||||
timeout_secs,
|
||||
)?;
|
||||
|
||||
// Check final status
|
||||
match self.status(&config.name)? {
|
||||
ServiceStatus::Running => Ok(()),
|
||||
ServiceStatus::Failed => Err(ServiceManagerError::StartFailed(
|
||||
config.name.clone(),
|
||||
"Service failed to start".to_string(),
|
||||
)),
|
||||
_ => Err(ServiceManagerError::StartFailed(
|
||||
config.name.clone(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn start_existing_and_confirm(
|
||||
&self,
|
||||
service_name: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<(), ServiceManagerError> {
|
||||
// Start the existing service first
|
||||
self.start_existing(service_name)?;
|
||||
|
||||
// Wait for confirmation with timeout using the shared runtime
|
||||
self.execute_async_with_timeout(
|
||||
async move {
|
||||
let start_time = std::time::Instant::now();
|
||||
let timeout_duration = Duration::from_secs(timeout_secs);
|
||||
|
||||
while start_time.elapsed() < timeout_duration {
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
// Return a timeout error that will be handled by execute_async_with_timeout
|
||||
// Use a generic error since we don't know the exact ZinitError variants
|
||||
Err(ZinitError::from(std::io::Error::new(
|
||||
std::io::ErrorKind::TimedOut,
|
||||
"Timeout waiting for service confirmation",
|
||||
)))
|
||||
},
|
||||
timeout_secs,
|
||||
)?;
|
||||
|
||||
// Check final status
|
||||
match self.status(service_name)? {
|
||||
ServiceStatus::Running => Ok(()),
|
||||
ServiceStatus::Failed => Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
"Service failed to start".to_string(),
|
||||
)),
|
||||
_ => Err(ServiceManagerError::StartFailed(
|
||||
service_name.to_string(),
|
||||
format!("Service did not start within {} seconds", timeout_secs),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn stop(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let service_name_for_error = service_name.to_string();
|
||||
self.execute_async(async move { client.stop(&service_name_owned).await })
|
||||
.map_err(|e| ServiceManagerError::StopFailed(service_name_for_error, e.to_string()))
|
||||
}
|
||||
|
||||
fn restart(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let service_name_for_error = service_name.to_string();
|
||||
self.execute_async(async move { client.restart(&service_name_owned).await })
|
||||
.map_err(|e| ServiceManagerError::RestartFailed(service_name_for_error, e.to_string()))
|
||||
}
|
||||
|
||||
fn status(&self, service_name: &str) -> Result<ServiceStatus, ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let service_name_for_error = service_name.to_string();
|
||||
let status: ZinitServiceStatus = self
|
||||
.execute_async(async move { client.status(&service_name_owned).await })
|
||||
.map_err(|e| {
|
||||
// Check if this is a "service not found" error
|
||||
if e.to_string().contains("not found") || e.to_string().contains("does not exist") {
|
||||
ServiceManagerError::ServiceNotFound(service_name_for_error)
|
||||
} else {
|
||||
ServiceManagerError::Other(e.to_string())
|
||||
}
|
||||
})?;
|
||||
|
||||
// ServiceStatus is a struct with fields, not an enum
|
||||
// We need to check the state field to determine the status
|
||||
// Convert ServiceState to string and match on that
|
||||
let state_str = format!("{:?}", status.state).to_lowercase();
|
||||
let service_status = match state_str.as_str() {
|
||||
s if s.contains("running") => crate::ServiceStatus::Running,
|
||||
s if s.contains("stopped") => crate::ServiceStatus::Stopped,
|
||||
s if s.contains("failed") => crate::ServiceStatus::Failed,
|
||||
_ => crate::ServiceStatus::Unknown,
|
||||
};
|
||||
Ok(service_status)
|
||||
}
|
||||
|
||||
fn logs(
|
||||
&self,
|
||||
service_name: &str,
|
||||
_lines: Option<usize>,
|
||||
) -> Result<String, ServiceManagerError> {
|
||||
// The logs method takes (follow: bool, filter: Option<impl AsRef<str>>)
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name_owned = service_name.to_string();
|
||||
let logs = self
|
||||
.execute_async(async move {
|
||||
use futures::StreamExt;
|
||||
use tokio::time::{timeout, Duration};
|
||||
|
||||
let mut log_stream = client
|
||||
.logs(false, Some(service_name_owned.as_str()))
|
||||
.await?;
|
||||
let mut logs = Vec::new();
|
||||
|
||||
// Collect logs from the stream with a reasonable limit
|
||||
let mut count = 0;
|
||||
const MAX_LOGS: usize = 100;
|
||||
const LOG_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
// Use timeout to prevent hanging
|
||||
let result = timeout(LOG_TIMEOUT, async {
|
||||
while let Some(log_result) = log_stream.next().await {
|
||||
match log_result {
|
||||
Ok(log_entry) => {
|
||||
logs.push(format!("{:?}", log_entry));
|
||||
count += 1;
|
||||
if count >= MAX_LOGS {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
// Handle timeout - this is not an error, just means no more logs available
|
||||
if result.is_err() {
|
||||
log::debug!(
|
||||
"Log reading timed out after {} seconds, returning {} logs",
|
||||
LOG_TIMEOUT.as_secs(),
|
||||
logs.len()
|
||||
);
|
||||
}
|
||||
|
||||
Ok::<Vec<String>, ZinitError>(logs)
|
||||
})
|
||||
.map_err(|e| {
|
||||
ServiceManagerError::LogsFailed(service_name.to_string(), e.to_string())
|
||||
})?;
|
||||
Ok(logs.join("\n"))
|
||||
}
|
||||
|
||||
fn list(&self) -> Result<Vec<String>, ServiceManagerError> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let services = self
|
||||
.execute_async(async move { client.list().await })
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))?;
|
||||
Ok(services.keys().cloned().collect())
|
||||
}
|
||||
|
||||
fn remove(&self, service_name: &str) -> Result<(), ServiceManagerError> {
|
||||
// Try to stop the service first, but don't fail if it's already stopped or doesn't exist
|
||||
if let Err(e) = self.stop(service_name) {
|
||||
// Log the error but continue with removal
|
||||
log::warn!(
|
||||
"Failed to stop service '{}' before removal: {}",
|
||||
service_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
|
||||
let client = Arc::clone(&self.client);
|
||||
let service_name = service_name.to_string();
|
||||
self.execute_async(async move { client.delete_service(&service_name).await })
|
||||
.map_err(|e| ServiceManagerError::Other(e.to_string()))
|
||||
}
|
||||
}
|
||||
@@ -1,243 +0,0 @@
|
||||
use sal_service_manager::{create_service_manager, ServiceConfig, ServiceManager};
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[test]
|
||||
fn test_create_service_manager() {
|
||||
// Test that the factory function creates the appropriate service manager for the platform
|
||||
let manager = create_service_manager().expect("Failed to create service manager");
|
||||
|
||||
// Test basic functionality - should be able to call methods without panicking
|
||||
let list_result = manager.list();
|
||||
|
||||
// The result might be an error (if no service system is available), but it shouldn't panic
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!(
|
||||
"✓ Service manager created successfully, found {} services",
|
||||
services.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Service manager created, but got expected error: {}", e);
|
||||
// This is expected on systems without the appropriate service manager
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_config_creation() {
|
||||
// Test creating various service configurations
|
||||
let basic_config = ServiceConfig {
|
||||
name: "test-service".to_string(),
|
||||
binary_path: "/usr/bin/echo".to_string(),
|
||||
args: vec!["hello".to_string(), "world".to_string()],
|
||||
working_directory: None,
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
assert_eq!(basic_config.name, "test-service");
|
||||
assert_eq!(basic_config.binary_path, "/usr/bin/echo");
|
||||
assert_eq!(basic_config.args.len(), 2);
|
||||
assert_eq!(basic_config.args[0], "hello");
|
||||
assert_eq!(basic_config.args[1], "world");
|
||||
assert!(basic_config.working_directory.is_none());
|
||||
assert!(basic_config.environment.is_empty());
|
||||
assert!(!basic_config.auto_restart);
|
||||
|
||||
println!("✓ Basic service config created successfully");
|
||||
|
||||
// Test config with environment variables
|
||||
let mut env = HashMap::new();
|
||||
env.insert("PATH".to_string(), "/usr/bin:/bin".to_string());
|
||||
env.insert("HOME".to_string(), "/tmp".to_string());
|
||||
|
||||
let env_config = ServiceConfig {
|
||||
name: "env-service".to_string(),
|
||||
binary_path: "/usr/bin/env".to_string(),
|
||||
args: vec![],
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
environment: env.clone(),
|
||||
auto_restart: true,
|
||||
};
|
||||
|
||||
assert_eq!(env_config.name, "env-service");
|
||||
assert_eq!(env_config.binary_path, "/usr/bin/env");
|
||||
assert!(env_config.args.is_empty());
|
||||
assert_eq!(env_config.working_directory, Some("/tmp".to_string()));
|
||||
assert_eq!(env_config.environment.len(), 2);
|
||||
assert_eq!(
|
||||
env_config.environment.get("PATH"),
|
||||
Some(&"/usr/bin:/bin".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
env_config.environment.get("HOME"),
|
||||
Some(&"/tmp".to_string())
|
||||
);
|
||||
assert!(env_config.auto_restart);
|
||||
|
||||
println!("✓ Environment service config created successfully");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_config_clone() {
|
||||
// Test that ServiceConfig can be cloned
|
||||
let original_config = ServiceConfig {
|
||||
name: "original".to_string(),
|
||||
binary_path: "/bin/sh".to_string(),
|
||||
args: vec!["-c".to_string(), "echo test".to_string()],
|
||||
working_directory: Some("/home".to_string()),
|
||||
environment: {
|
||||
let mut env = HashMap::new();
|
||||
env.insert("TEST".to_string(), "value".to_string());
|
||||
env
|
||||
},
|
||||
auto_restart: true,
|
||||
};
|
||||
|
||||
let cloned_config = original_config.clone();
|
||||
|
||||
assert_eq!(original_config.name, cloned_config.name);
|
||||
assert_eq!(original_config.binary_path, cloned_config.binary_path);
|
||||
assert_eq!(original_config.args, cloned_config.args);
|
||||
assert_eq!(
|
||||
original_config.working_directory,
|
||||
cloned_config.working_directory
|
||||
);
|
||||
assert_eq!(original_config.environment, cloned_config.environment);
|
||||
assert_eq!(original_config.auto_restart, cloned_config.auto_restart);
|
||||
|
||||
println!("✓ Service config cloning works correctly");
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
#[test]
|
||||
fn test_macos_service_manager() {
|
||||
use sal_service_manager::LaunchctlServiceManager;
|
||||
|
||||
// Test creating macOS-specific service manager
|
||||
let manager = LaunchctlServiceManager::new();
|
||||
|
||||
// Test basic functionality
|
||||
let list_result = manager.list();
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!(
|
||||
"✓ macOS LaunchctlServiceManager created successfully, found {} services",
|
||||
services.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!(
|
||||
"✓ macOS LaunchctlServiceManager created, but got expected error: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[test]
|
||||
fn test_linux_service_manager() {
|
||||
use sal_service_manager::SystemdServiceManager;
|
||||
|
||||
// Test creating Linux-specific service manager
|
||||
let manager = SystemdServiceManager::new();
|
||||
|
||||
// Test basic functionality
|
||||
let list_result = manager.list();
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!(
|
||||
"✓ Linux SystemdServiceManager created successfully, found {} services",
|
||||
services.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!(
|
||||
"✓ Linux SystemdServiceManager created, but got expected error: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_status_debug() {
|
||||
use sal_service_manager::ServiceStatus;
|
||||
|
||||
// Test that ServiceStatus can be debugged and cloned
|
||||
let statuses = vec![
|
||||
ServiceStatus::Running,
|
||||
ServiceStatus::Stopped,
|
||||
ServiceStatus::Failed,
|
||||
ServiceStatus::Unknown,
|
||||
];
|
||||
|
||||
for status in &statuses {
|
||||
let cloned = status.clone();
|
||||
let debug_str = format!("{:?}", status);
|
||||
|
||||
assert!(!debug_str.is_empty());
|
||||
assert_eq!(status, &cloned);
|
||||
|
||||
println!(
|
||||
"✓ ServiceStatus::{:?} debug and clone work correctly",
|
||||
status
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_manager_error_debug() {
|
||||
use sal_service_manager::ServiceManagerError;
|
||||
|
||||
// Test that ServiceManagerError can be debugged and displayed
|
||||
let errors = vec![
|
||||
ServiceManagerError::ServiceNotFound("test".to_string()),
|
||||
ServiceManagerError::ServiceAlreadyExists("test".to_string()),
|
||||
ServiceManagerError::StartFailed("test".to_string(), "reason".to_string()),
|
||||
ServiceManagerError::StopFailed("test".to_string(), "reason".to_string()),
|
||||
ServiceManagerError::RestartFailed("test".to_string(), "reason".to_string()),
|
||||
ServiceManagerError::LogsFailed("test".to_string(), "reason".to_string()),
|
||||
ServiceManagerError::Other("generic error".to_string()),
|
||||
];
|
||||
|
||||
for error in &errors {
|
||||
let debug_str = format!("{:?}", error);
|
||||
let display_str = format!("{}", error);
|
||||
|
||||
assert!(!debug_str.is_empty());
|
||||
assert!(!display_str.is_empty());
|
||||
|
||||
println!("✓ Error debug: {:?}", error);
|
||||
println!("✓ Error display: {}", error);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_manager_trait_object() {
|
||||
// Test that we can use ServiceManager as a trait object
|
||||
let manager: Box<dyn ServiceManager> =
|
||||
create_service_manager().expect("Failed to create service manager");
|
||||
|
||||
// Test that we can call methods through the trait object
|
||||
let list_result = manager.list();
|
||||
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!("✓ Trait object works, found {} services", services.len());
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Trait object works, got expected error: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Test exists method
|
||||
let exists_result = manager.exists("non-existent-service");
|
||||
match exists_result {
|
||||
Ok(false) => println!("✓ Trait object exists method works correctly"),
|
||||
Ok(true) => println!("⚠ Unexpectedly found non-existent service"),
|
||||
Err(_) => println!("✓ Trait object exists method works (with error)"),
|
||||
}
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
// Service lifecycle management test script
|
||||
// This script tests REAL complete service lifecycle scenarios
|
||||
|
||||
print("=== Service Lifecycle Management Test ===");
|
||||
|
||||
// Create service manager
|
||||
let manager = create_service_manager();
|
||||
print("✓ Service manager created");
|
||||
|
||||
// Test configuration - real services for testing
|
||||
let test_services = [
|
||||
#{
|
||||
name: "lifecycle-test-1",
|
||||
binary_path: "/bin/echo",
|
||||
args: ["Lifecycle test 1"],
|
||||
working_directory: "/tmp",
|
||||
environment: #{},
|
||||
auto_restart: false
|
||||
},
|
||||
#{
|
||||
name: "lifecycle-test-2",
|
||||
binary_path: "/bin/echo",
|
||||
args: ["Lifecycle test 2"],
|
||||
working_directory: "/tmp",
|
||||
environment: #{ "TEST_VAR": "test_value" },
|
||||
auto_restart: false
|
||||
}
|
||||
];
|
||||
|
||||
let total_tests = 0;
|
||||
let passed_tests = 0;
|
||||
|
||||
// Test 1: Service Creation and Start
|
||||
print("\n1. Testing service creation and start...");
|
||||
for service_config in test_services {
|
||||
print(`\nStarting service: ${service_config.name}`);
|
||||
try {
|
||||
start(manager, service_config);
|
||||
print(` ✓ Service ${service_config.name} started successfully`);
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Service ${service_config.name} start failed: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 2: Service Existence Check
|
||||
print("\n2. Testing service existence checks...");
|
||||
for service_config in test_services {
|
||||
print(`\nChecking existence of: ${service_config.name}`);
|
||||
try {
|
||||
let service_exists = exists(manager, service_config.name);
|
||||
if service_exists {
|
||||
print(` ✓ Service ${service_config.name} exists: ${service_exists}`);
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print(` ✗ Service ${service_config.name} doesn't exist after start`);
|
||||
}
|
||||
} catch(e) {
|
||||
print(` ✗ Existence check failed for ${service_config.name}: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 3: Status Check
|
||||
print("\n3. Testing status checks...");
|
||||
for service_config in test_services {
|
||||
print(`\nChecking status of: ${service_config.name}`);
|
||||
try {
|
||||
let service_status = status(manager, service_config.name);
|
||||
print(` ✓ Service ${service_config.name} status: ${service_status}`);
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Status check failed for ${service_config.name}: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 4: Service List Check
|
||||
print("\n4. Testing service list...");
|
||||
try {
|
||||
let services = list(manager);
|
||||
print(` ✓ Service list retrieved (${services.len()} services)`);
|
||||
|
||||
// Check if our test services are in the list
|
||||
for service_config in test_services {
|
||||
let found = false;
|
||||
for service in services {
|
||||
if service.contains(service_config.name) {
|
||||
found = true;
|
||||
print(` ✓ Found ${service_config.name} in list`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
print(` ⚠ ${service_config.name} not found in service list`);
|
||||
}
|
||||
}
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Service list failed: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
|
||||
// Test 5: Service Stop
|
||||
print("\n5. Testing service stop...");
|
||||
for service_config in test_services {
|
||||
print(`\nStopping service: ${service_config.name}`);
|
||||
try {
|
||||
stop(manager, service_config.name);
|
||||
print(` ✓ Service ${service_config.name} stopped successfully`);
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Service ${service_config.name} stop failed: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 6: Service Removal
|
||||
print("\n6. Testing service removal...");
|
||||
for service_config in test_services {
|
||||
print(`\nRemoving service: ${service_config.name}`);
|
||||
try {
|
||||
remove(manager, service_config.name);
|
||||
print(` ✓ Service ${service_config.name} removed successfully`);
|
||||
passed_tests += 1;
|
||||
} catch(e) {
|
||||
print(` ✗ Service ${service_config.name} removal failed: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 7: Cleanup Verification
|
||||
print("\n7. Testing cleanup verification...");
|
||||
for service_config in test_services {
|
||||
print(`\nVerifying removal of: ${service_config.name}`);
|
||||
try {
|
||||
let exists_after_remove = exists(manager, service_config.name);
|
||||
if !exists_after_remove {
|
||||
print(` ✓ Service ${service_config.name} correctly doesn't exist after removal`);
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print(` ✗ Service ${service_config.name} still exists after removal`);
|
||||
}
|
||||
} catch(e) {
|
||||
print(` ✗ Cleanup verification failed for ${service_config.name}: ${e}`);
|
||||
}
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test Summary
|
||||
print("\n=== Lifecycle Test Summary ===");
|
||||
print(`Services tested: ${test_services.len()}`);
|
||||
print(`Total operations: ${total_tests}`);
|
||||
print(`Successful operations: ${passed_tests}`);
|
||||
print(`Failed operations: ${total_tests - passed_tests}`);
|
||||
print(`Success rate: ${(passed_tests * 100) / total_tests}%`);
|
||||
|
||||
if passed_tests == total_tests {
|
||||
print("\n🎉 All lifecycle tests passed!");
|
||||
print("Service manager is working correctly across all scenarios.");
|
||||
} else {
|
||||
print(`\n⚠ ${total_tests - passed_tests} test(s) failed`);
|
||||
print("Some service manager operations need attention.");
|
||||
}
|
||||
|
||||
print("\n=== Service Lifecycle Test Complete ===");
|
||||
|
||||
// Return test results
|
||||
#{
|
||||
summary: #{
|
||||
total_tests: total_tests,
|
||||
passed_tests: passed_tests,
|
||||
success_rate: (passed_tests * 100) / total_tests,
|
||||
services_tested: test_services.len()
|
||||
}
|
||||
}
|
||||
@@ -1,218 +0,0 @@
|
||||
// Basic service manager functionality test script
|
||||
// This script tests the REAL service manager through Rhai integration
|
||||
|
||||
print("=== Service Manager Basic Functionality Test ===");
|
||||
|
||||
// Test configuration
|
||||
let test_service_name = "rhai-test-service";
|
||||
let test_binary = "/bin/echo";
|
||||
let test_args = ["Hello from Rhai service manager test"];
|
||||
|
||||
print(`Testing service: ${test_service_name}`);
|
||||
print(`Binary: ${test_binary}`);
|
||||
print(`Args: ${test_args}`);
|
||||
|
||||
// Test results tracking
|
||||
let test_results = #{
|
||||
creation: "NOT_RUN",
|
||||
exists_before: "NOT_RUN",
|
||||
start: "NOT_RUN",
|
||||
exists_after: "NOT_RUN",
|
||||
status: "NOT_RUN",
|
||||
list: "NOT_RUN",
|
||||
stop: "NOT_RUN",
|
||||
remove: "NOT_RUN",
|
||||
cleanup: "NOT_RUN"
|
||||
};
|
||||
|
||||
let passed_tests = 0;
|
||||
let total_tests = 0;
|
||||
|
||||
// Test 1: Service Manager Creation
|
||||
print("\n1. Testing service manager creation...");
|
||||
try {
|
||||
let manager = create_service_manager();
|
||||
print("✓ Service manager created successfully");
|
||||
test_results["creation"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service manager creation failed: ${e}`);
|
||||
test_results["creation"] = "FAIL";
|
||||
total_tests += 1;
|
||||
// Return early if we can't create the manager
|
||||
return test_results;
|
||||
}
|
||||
|
||||
// Create the service manager for all subsequent tests
|
||||
let manager = create_service_manager();
|
||||
|
||||
// Test 2: Check if service exists before creation
|
||||
print("\n2. Testing service existence check (before creation)...");
|
||||
try {
|
||||
let exists_before = exists(manager, test_service_name);
|
||||
print(`✓ Service existence check: ${exists_before}`);
|
||||
|
||||
if !exists_before {
|
||||
print("✓ Service correctly doesn't exist before creation");
|
||||
test_results["exists_before"] = "PASS";
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print("⚠ Service unexpectedly exists before creation");
|
||||
test_results["exists_before"] = "WARN";
|
||||
}
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service existence check failed: ${e}`);
|
||||
test_results["exists_before"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 3: Start the service
|
||||
print("\n3. Testing service start...");
|
||||
try {
|
||||
// Create a service configuration object
|
||||
let service_config = #{
|
||||
name: test_service_name,
|
||||
binary_path: test_binary,
|
||||
args: test_args,
|
||||
working_directory: "/tmp",
|
||||
environment: #{},
|
||||
auto_restart: false
|
||||
};
|
||||
|
||||
start(manager, service_config);
|
||||
print("✓ Service started successfully");
|
||||
test_results["start"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service start failed: ${e}`);
|
||||
test_results["start"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 4: Check if service exists after creation
|
||||
print("\n4. Testing service existence check (after creation)...");
|
||||
try {
|
||||
let exists_after = exists(manager, test_service_name);
|
||||
print(`✓ Service existence check: ${exists_after}`);
|
||||
|
||||
if exists_after {
|
||||
print("✓ Service correctly exists after creation");
|
||||
test_results["exists_after"] = "PASS";
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print("✗ Service doesn't exist after creation");
|
||||
test_results["exists_after"] = "FAIL";
|
||||
}
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service existence check failed: ${e}`);
|
||||
test_results["exists_after"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 5: Check service status
|
||||
print("\n5. Testing service status...");
|
||||
try {
|
||||
let service_status = status(manager, test_service_name);
|
||||
print(`✓ Service status: ${service_status}`);
|
||||
test_results["status"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service status check failed: ${e}`);
|
||||
test_results["status"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 6: List services
|
||||
print("\n6. Testing service list...");
|
||||
try {
|
||||
let services = list(manager);
|
||||
print("✓ Service list retrieved");
|
||||
|
||||
// Skip service search due to Rhai type constraints with Vec iteration
|
||||
print(" ⚠️ Skipping service search due to Rhai type constraints");
|
||||
|
||||
test_results["list"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service list failed: ${e}`);
|
||||
test_results["list"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 7: Stop the service
|
||||
print("\n7. Testing service stop...");
|
||||
try {
|
||||
stop(manager, test_service_name);
|
||||
print(`✓ Service stopped: ${test_service_name}`);
|
||||
test_results["stop"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service stop failed: ${e}`);
|
||||
test_results["stop"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 8: Remove the service
|
||||
print("\n8. Testing service remove...");
|
||||
try {
|
||||
remove(manager, test_service_name);
|
||||
print(`✓ Service removed: ${test_service_name}`);
|
||||
test_results["remove"] = "PASS";
|
||||
passed_tests += 1;
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Service remove failed: ${e}`);
|
||||
test_results["remove"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test 9: Verify cleanup
|
||||
print("\n9. Testing cleanup verification...");
|
||||
try {
|
||||
let exists_after_remove = exists(manager, test_service_name);
|
||||
if !exists_after_remove {
|
||||
print("✓ Service correctly doesn't exist after removal");
|
||||
test_results["cleanup"] = "PASS";
|
||||
passed_tests += 1;
|
||||
} else {
|
||||
print("✗ Service still exists after removal");
|
||||
test_results["cleanup"] = "FAIL";
|
||||
}
|
||||
total_tests += 1;
|
||||
} catch(e) {
|
||||
print(`✗ Cleanup verification failed: ${e}`);
|
||||
test_results["cleanup"] = "FAIL";
|
||||
total_tests += 1;
|
||||
}
|
||||
|
||||
// Test Summary
|
||||
print("\n=== Test Summary ===");
|
||||
print(`Total tests: ${total_tests}`);
|
||||
print(`Passed: ${passed_tests}`);
|
||||
print(`Failed: ${total_tests - passed_tests}`);
|
||||
print(`Success rate: ${(passed_tests * 100) / total_tests}%`);
|
||||
|
||||
print("\nDetailed Results:");
|
||||
for test_name in test_results.keys() {
|
||||
let result = test_results[test_name];
|
||||
let status_icon = if result == "PASS" { "✓" } else if result == "FAIL" { "✗" } else { "⚠" };
|
||||
print(` ${status_icon} ${test_name}: ${result}`);
|
||||
}
|
||||
|
||||
if passed_tests == total_tests {
|
||||
print("\n🎉 All tests passed!");
|
||||
} else {
|
||||
print(`\n⚠ ${total_tests - passed_tests} test(s) failed`);
|
||||
}
|
||||
|
||||
print("\n=== Service Manager Basic Test Complete ===");
|
||||
|
||||
// Return test results for potential use by calling code
|
||||
test_results
|
||||
@@ -1,252 +0,0 @@
|
||||
use rhai::{Engine, EvalAltResult};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
/// Helper function to create a Rhai engine for service manager testing
|
||||
fn create_service_manager_engine() -> Result<Engine, Box<EvalAltResult>> {
|
||||
#[cfg(feature = "rhai")]
|
||||
{
|
||||
let mut engine = Engine::new();
|
||||
// Register the service manager module for real testing
|
||||
sal_service_manager::rhai::register_service_manager_module(&mut engine)?;
|
||||
Ok(engine)
|
||||
}
|
||||
#[cfg(not(feature = "rhai"))]
|
||||
{
|
||||
Ok(Engine::new())
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to run a Rhai script file
|
||||
fn run_rhai_script(script_path: &str) -> Result<rhai::Dynamic, Box<EvalAltResult>> {
|
||||
let engine = create_service_manager_engine()?;
|
||||
|
||||
// Read the script file
|
||||
let script_content = fs::read_to_string(script_path)
|
||||
.map_err(|e| format!("Failed to read script file {}: {}", script_path, e))?;
|
||||
|
||||
// Execute the script
|
||||
engine.eval::<rhai::Dynamic>(&script_content)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_service_manager_basic() {
|
||||
let script_path = "tests/rhai/service_manager_basic.rhai";
|
||||
|
||||
if !Path::new(script_path).exists() {
|
||||
println!("⚠ Skipping test: Rhai script not found at {}", script_path);
|
||||
return;
|
||||
}
|
||||
|
||||
println!("Running Rhai service manager basic test...");
|
||||
|
||||
match run_rhai_script(script_path) {
|
||||
Ok(result) => {
|
||||
println!("✓ Rhai basic test completed successfully");
|
||||
|
||||
// Try to extract test results if the script returns them
|
||||
if let Some(map) = result.try_cast::<rhai::Map>() {
|
||||
println!("Test results received from Rhai script:");
|
||||
for (key, value) in map.iter() {
|
||||
println!(" {}: {:?}", key, value);
|
||||
}
|
||||
|
||||
// Check if all tests passed
|
||||
let all_passed = map.values().all(|v| {
|
||||
if let Some(s) = v.clone().try_cast::<String>() {
|
||||
s == "PASS"
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
if all_passed {
|
||||
println!("✓ All Rhai tests reported as PASS");
|
||||
} else {
|
||||
println!("⚠ Some Rhai tests did not pass");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✗ Rhai basic test failed: {}", e);
|
||||
assert!(false, "Rhai script execution failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_service_lifecycle() {
|
||||
let script_path = "tests/rhai/service_lifecycle.rhai";
|
||||
|
||||
if !Path::new(script_path).exists() {
|
||||
println!("⚠ Skipping test: Rhai script not found at {}", script_path);
|
||||
return;
|
||||
}
|
||||
|
||||
println!("Running Rhai service lifecycle test...");
|
||||
|
||||
match run_rhai_script(script_path) {
|
||||
Ok(result) => {
|
||||
println!("✓ Rhai lifecycle test completed successfully");
|
||||
|
||||
// Try to extract test results if the script returns them
|
||||
if let Some(map) = result.try_cast::<rhai::Map>() {
|
||||
println!("Lifecycle test results received from Rhai script:");
|
||||
|
||||
// Extract summary if available
|
||||
if let Some(summary) = map.get("summary") {
|
||||
if let Some(summary_map) = summary.clone().try_cast::<rhai::Map>() {
|
||||
println!("Summary:");
|
||||
for (key, value) in summary_map.iter() {
|
||||
println!(" {}: {:?}", key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract performance metrics if available
|
||||
if let Some(performance) = map.get("performance") {
|
||||
if let Some(perf_map) = performance.clone().try_cast::<rhai::Map>() {
|
||||
println!("Performance:");
|
||||
for (key, value) in perf_map.iter() {
|
||||
println!(" {}: {:?}", key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✗ Rhai lifecycle test failed: {}", e);
|
||||
assert!(false, "Rhai script execution failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_engine_functionality() {
|
||||
println!("Testing basic Rhai engine functionality...");
|
||||
|
||||
let engine = create_service_manager_engine().expect("Failed to create Rhai engine");
|
||||
|
||||
// Test basic Rhai functionality
|
||||
let test_script = r#"
|
||||
let test_results = #{
|
||||
basic_math: 2 + 2 == 4,
|
||||
string_ops: "hello".len() == 5,
|
||||
array_ops: [1, 2, 3].len() == 3,
|
||||
map_ops: #{ a: 1, b: 2 }.len() == 2
|
||||
};
|
||||
|
||||
let all_passed = true;
|
||||
for result in test_results.values() {
|
||||
if !result {
|
||||
all_passed = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#{
|
||||
results: test_results,
|
||||
all_passed: all_passed
|
||||
}
|
||||
"#;
|
||||
|
||||
match engine.eval::<rhai::Dynamic>(test_script) {
|
||||
Ok(result) => {
|
||||
if let Some(map) = result.try_cast::<rhai::Map>() {
|
||||
if let Some(all_passed) = map.get("all_passed") {
|
||||
if let Some(passed) = all_passed.clone().try_cast::<bool>() {
|
||||
if passed {
|
||||
println!("✓ All basic Rhai functionality tests passed");
|
||||
} else {
|
||||
println!("✗ Some basic Rhai functionality tests failed");
|
||||
assert!(false, "Basic Rhai tests failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(results) = map.get("results") {
|
||||
if let Some(results_map) = results.clone().try_cast::<rhai::Map>() {
|
||||
println!("Detailed results:");
|
||||
for (test_name, result) in results_map.iter() {
|
||||
let status = if let Some(passed) = result.clone().try_cast::<bool>() {
|
||||
if passed {
|
||||
"✓"
|
||||
} else {
|
||||
"✗"
|
||||
}
|
||||
} else {
|
||||
"?"
|
||||
};
|
||||
println!(" {} {}: {:?}", status, test_name, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✗ Basic Rhai functionality test failed: {}", e);
|
||||
assert!(false, "Basic Rhai test failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_script_error_handling() {
|
||||
println!("Testing Rhai error handling...");
|
||||
|
||||
let engine = create_service_manager_engine().expect("Failed to create Rhai engine");
|
||||
|
||||
// Test script with intentional error
|
||||
let error_script = r#"
|
||||
let result = "test";
|
||||
result.non_existent_method(); // This should cause an error
|
||||
"#;
|
||||
|
||||
match engine.eval::<rhai::Dynamic>(error_script) {
|
||||
Ok(_) => {
|
||||
println!("⚠ Expected error but script succeeded");
|
||||
assert!(
|
||||
false,
|
||||
"Error handling test failed - expected error but got success"
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Error correctly caught: {}", e);
|
||||
// Verify it's the expected type of error
|
||||
assert!(e.to_string().contains("method") || e.to_string().contains("function"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rhai_script_files_exist() {
|
||||
println!("Checking that Rhai test scripts exist...");
|
||||
|
||||
let script_files = [
|
||||
"tests/rhai/service_manager_basic.rhai",
|
||||
"tests/rhai/service_lifecycle.rhai",
|
||||
];
|
||||
|
||||
for script_file in &script_files {
|
||||
if Path::new(script_file).exists() {
|
||||
println!("✓ Found script: {}", script_file);
|
||||
|
||||
// Verify the file is readable and not empty
|
||||
match fs::read_to_string(script_file) {
|
||||
Ok(content) => {
|
||||
if content.trim().is_empty() {
|
||||
assert!(false, "Script file {} is empty", script_file);
|
||||
}
|
||||
println!(" Content length: {} characters", content.len());
|
||||
}
|
||||
Err(e) => {
|
||||
assert!(false, "Failed to read script file {}: {}", script_file, e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert!(false, "Required script file not found: {}", script_file);
|
||||
}
|
||||
}
|
||||
|
||||
println!("✓ All required Rhai script files exist and are readable");
|
||||
}
|
||||
@@ -1,317 +0,0 @@
|
||||
use sal_service_manager::{
|
||||
ServiceConfig, ServiceManager, ServiceManagerError, ServiceStatus, ZinitServiceManager,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
/// Helper function to find an available Zinit socket path
|
||||
async fn get_available_socket_path() -> Option<String> {
|
||||
let socket_paths = [
|
||||
"/var/run/zinit.sock",
|
||||
"/tmp/zinit.sock",
|
||||
"/run/zinit.sock",
|
||||
"./zinit.sock",
|
||||
];
|
||||
|
||||
for path in &socket_paths {
|
||||
// Try to create a ZinitServiceManager to test connectivity
|
||||
if let Ok(manager) = ZinitServiceManager::new(path) {
|
||||
// Test if we can list services (basic connectivity test)
|
||||
if manager.list().is_ok() {
|
||||
println!("✓ Found working Zinit socket at: {}", path);
|
||||
return Some(path.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Helper function to clean up test services
|
||||
async fn cleanup_test_service(manager: &dyn ServiceManager, service_name: &str) {
|
||||
let _ = manager.stop(service_name);
|
||||
let _ = manager.remove(service_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_zinit_service_manager_creation() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path);
|
||||
assert!(
|
||||
manager.is_ok(),
|
||||
"Should be able to create ZinitServiceManager"
|
||||
);
|
||||
|
||||
let manager = manager.unwrap();
|
||||
|
||||
// Test basic connectivity by listing services
|
||||
let list_result = manager.list();
|
||||
assert!(list_result.is_ok(), "Should be able to list services");
|
||||
|
||||
println!("✓ ZinitServiceManager created successfully");
|
||||
} else {
|
||||
println!("⚠ Skipping test_zinit_service_manager_creation: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_service_lifecycle() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path).expect("Failed to create manager");
|
||||
let service_name = "test-lifecycle-service";
|
||||
|
||||
// Clean up any existing service first
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
binary_path: "echo".to_string(),
|
||||
args: vec!["Hello from lifecycle test".to_string()],
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
// Test service creation and start
|
||||
println!("Testing service creation and start...");
|
||||
let start_result = manager.start(&config);
|
||||
match start_result {
|
||||
Ok(_) => {
|
||||
println!("✓ Service started successfully");
|
||||
|
||||
// Wait a bit for the service to run
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
// Test service exists
|
||||
let exists = manager.exists(service_name);
|
||||
assert!(exists.is_ok(), "Should be able to check if service exists");
|
||||
|
||||
if let Ok(true) = exists {
|
||||
println!("✓ Service exists check passed");
|
||||
|
||||
// Test service status
|
||||
let status_result = manager.status(service_name);
|
||||
match status_result {
|
||||
Ok(status) => {
|
||||
println!("✓ Service status: {:?}", status);
|
||||
assert!(
|
||||
matches!(status, ServiceStatus::Running | ServiceStatus::Stopped),
|
||||
"Service should be running or stopped (for oneshot)"
|
||||
);
|
||||
}
|
||||
Err(e) => println!("⚠ Status check failed: {}", e),
|
||||
}
|
||||
|
||||
// Test service logs
|
||||
let logs_result = manager.logs(service_name, None);
|
||||
match logs_result {
|
||||
Ok(logs) => {
|
||||
println!("✓ Retrieved logs: {}", logs.len());
|
||||
// For echo command, we should have some output
|
||||
assert!(
|
||||
!logs.is_empty() || logs.contains("Hello"),
|
||||
"Should have log output"
|
||||
);
|
||||
}
|
||||
Err(e) => println!("⚠ Logs retrieval failed: {}", e),
|
||||
}
|
||||
|
||||
// Test service list
|
||||
let list_result = manager.list();
|
||||
match list_result {
|
||||
Ok(services) => {
|
||||
println!("✓ Listed {} services", services.len());
|
||||
assert!(
|
||||
services.contains(&service_name.to_string()),
|
||||
"Service should appear in list"
|
||||
);
|
||||
}
|
||||
Err(e) => println!("⚠ List services failed: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
// Test service stop
|
||||
println!("Testing service stop...");
|
||||
let stop_result = manager.stop(service_name);
|
||||
match stop_result {
|
||||
Ok(_) => println!("✓ Service stopped successfully"),
|
||||
Err(e) => println!("⚠ Stop failed: {}", e),
|
||||
}
|
||||
|
||||
// Test service removal
|
||||
println!("Testing service removal...");
|
||||
let remove_result = manager.remove(service_name);
|
||||
match remove_result {
|
||||
Ok(_) => println!("✓ Service removed successfully"),
|
||||
Err(e) => println!("⚠ Remove failed: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("⚠ Service creation/start failed: {}", e);
|
||||
// This might be expected if zinit doesn't allow service creation
|
||||
}
|
||||
}
|
||||
|
||||
// Final cleanup
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
} else {
|
||||
println!("⚠ Skipping test_service_lifecycle: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_service_start_and_confirm() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path).expect("Failed to create manager");
|
||||
let service_name = "test-start-confirm-service";
|
||||
|
||||
// Clean up any existing service first
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
binary_path: "sleep".to_string(),
|
||||
args: vec!["5".to_string()], // Sleep for 5 seconds
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
environment: HashMap::new(),
|
||||
auto_restart: false,
|
||||
};
|
||||
|
||||
// Test start_and_confirm with timeout
|
||||
println!("Testing start_and_confirm with timeout...");
|
||||
let start_result = manager.start_and_confirm(&config, 10);
|
||||
match start_result {
|
||||
Ok(_) => {
|
||||
println!("✓ Service started and confirmed successfully");
|
||||
|
||||
// Verify it's actually running
|
||||
let status = manager.status(service_name);
|
||||
match status {
|
||||
Ok(ServiceStatus::Running) => println!("✓ Service confirmed running"),
|
||||
Ok(other_status) => {
|
||||
println!("⚠ Service in unexpected state: {:?}", other_status)
|
||||
}
|
||||
Err(e) => println!("⚠ Status check failed: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("⚠ start_and_confirm failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Test start_existing_and_confirm
|
||||
println!("Testing start_existing_and_confirm...");
|
||||
let start_existing_result = manager.start_existing_and_confirm(service_name, 5);
|
||||
match start_existing_result {
|
||||
Ok(_) => println!("✓ start_existing_and_confirm succeeded"),
|
||||
Err(e) => println!("⚠ start_existing_and_confirm failed: {}", e),
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
} else {
|
||||
println!("⚠ Skipping test_service_start_and_confirm: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_service_restart() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path).expect("Failed to create manager");
|
||||
let service_name = "test-restart-service";
|
||||
|
||||
// Clean up any existing service first
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
|
||||
let config = ServiceConfig {
|
||||
name: service_name.to_string(),
|
||||
binary_path: "echo".to_string(),
|
||||
args: vec!["Restart test".to_string()],
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
environment: HashMap::new(),
|
||||
auto_restart: true, // Enable auto-restart for this test
|
||||
};
|
||||
|
||||
// Start the service first
|
||||
let start_result = manager.start(&config);
|
||||
if start_result.is_ok() {
|
||||
// Wait for service to be established
|
||||
sleep(Duration::from_millis(1000)).await;
|
||||
|
||||
// Test restart
|
||||
println!("Testing service restart...");
|
||||
let restart_result = manager.restart(service_name);
|
||||
match restart_result {
|
||||
Ok(_) => {
|
||||
println!("✓ Service restarted successfully");
|
||||
|
||||
// Wait and check status
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
|
||||
let status_result = manager.status(service_name);
|
||||
match status_result {
|
||||
Ok(status) => {
|
||||
println!("✓ Service state after restart: {:?}", status);
|
||||
}
|
||||
Err(e) => println!("⚠ Status check after restart failed: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("⚠ Restart failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
cleanup_test_service(&manager, service_name).await;
|
||||
} else {
|
||||
println!("⚠ Skipping test_service_restart: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_handling() {
|
||||
if let Some(socket_path) = get_available_socket_path().await {
|
||||
let manager = ZinitServiceManager::new(&socket_path).expect("Failed to create manager");
|
||||
|
||||
// Test operations on non-existent service
|
||||
let non_existent_service = "non-existent-service-12345";
|
||||
|
||||
// Test status of non-existent service
|
||||
let status_result = manager.status(non_existent_service);
|
||||
match status_result {
|
||||
Err(ServiceManagerError::ServiceNotFound(_)) => {
|
||||
println!("✓ Correctly returned ServiceNotFound for non-existent service");
|
||||
}
|
||||
Err(other_error) => {
|
||||
println!(
|
||||
"⚠ Got different error for non-existent service: {}",
|
||||
other_error
|
||||
);
|
||||
}
|
||||
Ok(_) => {
|
||||
println!("⚠ Unexpectedly found non-existent service");
|
||||
}
|
||||
}
|
||||
|
||||
// Test exists for non-existent service
|
||||
let exists_result = manager.exists(non_existent_service);
|
||||
match exists_result {
|
||||
Ok(false) => println!("✓ Correctly reported non-existent service as not existing"),
|
||||
Ok(true) => println!("⚠ Incorrectly reported non-existent service as existing"),
|
||||
Err(e) => println!("⚠ Error checking existence: {}", e),
|
||||
}
|
||||
|
||||
// Test stop of non-existent service
|
||||
let stop_result = manager.stop(non_existent_service);
|
||||
match stop_result {
|
||||
Err(_) => println!("✓ Correctly failed to stop non-existent service"),
|
||||
Ok(_) => println!("⚠ Unexpectedly succeeded in stopping non-existent service"),
|
||||
}
|
||||
|
||||
println!("✓ Error handling tests completed");
|
||||
} else {
|
||||
println!("⚠ Skipping test_error_handling: No Zinit socket available");
|
||||
}
|
||||
}
|
||||
14
config/README.md
Normal file
14
config/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Environment Configuration
|
||||
|
||||
To set up your environment variables:
|
||||
|
||||
1. Copy the template file to `env.sh`:
|
||||
|
||||
```bash
|
||||
cp config/myenv_templ.sh config/env.sh
|
||||
```
|
||||
|
||||
2. Edit `config/env.sh` and fill in your specific values for the variables.
|
||||
|
||||
3. This file (`config/env.sh`) is excluded from version control by the project's `.gitignore` configuration, ensuring your sensitive information remains local and is never committed to the repository.
|
||||
|
||||
6
config/myenv_templ.sh
Normal file
6
config/myenv_templ.sh
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
|
||||
export OPENROUTER_API_KEY=""
|
||||
export GROQ_API_KEY=""
|
||||
export CEREBRAS_API_KEY=""
|
||||
export OPENAI_API_KEY="sk-xxxxxxx"
|
||||
@@ -1,6 +1,7 @@
|
||||
// Example of using the network modules in SAL through Rhai
|
||||
// Shows TCP port checking, HTTP URL validation, and SSH command execution
|
||||
|
||||
|
||||
// Function to print section header
|
||||
fn section(title) {
|
||||
print("\n");
|
||||
@@ -19,14 +20,14 @@ let host = "localhost";
|
||||
let port = 22;
|
||||
print(`Checking if port ${port} is open on ${host}...`);
|
||||
let is_open = tcp.check_port(host, port);
|
||||
print(`Port ${port} is ${is_open ? "open" : "closed"}`);
|
||||
print(`Port ${port} is ${if is_open { "open" } else { "closed" }}`);
|
||||
|
||||
// Check multiple ports
|
||||
let ports = [22, 80, 443];
|
||||
print(`Checking multiple ports on ${host}...`);
|
||||
let port_results = tcp.check_ports(host, ports);
|
||||
for result in port_results {
|
||||
print(`Port ${result.port} is ${result.is_open ? "open" : "closed"}`);
|
||||
print(`Port ${result.port} is ${if result.is_open { "open" } else { "closed" }}`);
|
||||
}
|
||||
|
||||
// HTTP connectivity checks
|
||||
@@ -39,7 +40,7 @@ let http = net::new_http_connector();
|
||||
let url = "https://www.example.com";
|
||||
print(`Checking if ${url} is reachable...`);
|
||||
let is_reachable = http.check_url(url);
|
||||
print(`${url} is ${is_reachable ? "reachable" : "unreachable"}`);
|
||||
print(`${url} is ${if is_reachable { "reachable" } else { "unreachable" }}`);
|
||||
|
||||
// Check the status code of a URL
|
||||
print(`Checking status code of ${url}...`);
|
||||
@@ -68,7 +69,7 @@ if is_open {
|
||||
let ssh = net::new_ssh_builder()
|
||||
.host("localhost")
|
||||
.port(22)
|
||||
.user(os::get_env("USER") || "root")
|
||||
.user(if os::get_env("USER") != () { os::get_env("USER") } else { "root" })
|
||||
.timeout(10)
|
||||
.build();
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
print("Running a basic command using run().do()...");
|
||||
print("Running a basic command using run().execute()...");
|
||||
|
||||
// Execute a simple command
|
||||
let result = run("echo Hello from run_basic!").do();
|
||||
let result = run("echo Hello from run_basic!").execute();
|
||||
|
||||
// Print the command result
|
||||
print(`Command: echo Hello from run_basic!`);
|
||||
@@ -13,6 +13,6 @@ print(`Stderr:\n${result.stderr}`);
|
||||
// Example of a command that might fail (if 'nonexistent_command' doesn't exist)
|
||||
// This will halt execution by default because ignore_error() is not used.
|
||||
// print("Running a command that will fail (and should halt)...");
|
||||
// let fail_result = run("nonexistent_command").do(); // This line will cause the script to halt if the command doesn't exist
|
||||
// let fail_result = run("nonexistent_command").execute(); // This line will cause the script to halt if the command doesn't exist
|
||||
|
||||
print("Basic run() example finished.");
|
||||
@@ -2,7 +2,7 @@ print("Running a command that will fail, but ignoring the error...");
|
||||
|
||||
// Run a command that exits with a non-zero code (will fail)
|
||||
// Using .ignore_error() prevents the script from halting
|
||||
let result = run("exit 1").ignore_error().do();
|
||||
let result = run("exit 1").ignore_error().execute();
|
||||
|
||||
print(`Command finished.`);
|
||||
print(`Success: ${result.success}`); // This should be false
|
||||
@@ -22,7 +22,7 @@ print("\nScript continued execution after the potentially failing command.");
|
||||
// Example of a command that might fail due to OS error (e.g., command not found)
|
||||
// This *might* still halt depending on how the underlying Rust function handles it,
|
||||
// as ignore_error() primarily prevents halting on *command* non-zero exit codes.
|
||||
// let os_error_result = run("nonexistent_command_123").ignore_error().do();
|
||||
// let os_error_result = run("nonexistent_command_123").ignore_error().execute();
|
||||
// print(`OS Error Command Success: ${os_error_result.success}`);
|
||||
// print(`OS Error Command Exit Code: ${os_error_result.code}`);
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
print("Running a command using run().log().do()...");
|
||||
print("Running a command using run().log().execute()...");
|
||||
|
||||
// The .log() method will print the command string to the console before execution.
|
||||
// This is useful for debugging or tracing which commands are being run.
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
print("Running a command using run().silent().do()...\n");
|
||||
print("Running a command using run().silent().execute()...\n");
|
||||
|
||||
// This command will print to standard output and standard error
|
||||
// However, because .silent() is used, the output will not appear in the console directly
|
||||
let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().do();
|
||||
let result = run("echo 'This should be silent stdout.'; echo 'This should be silent stderr.' >&2; exit 0").silent().execute();
|
||||
|
||||
// The output is still captured in the CommandResult
|
||||
print(`Command finished.`);
|
||||
@@ -12,7 +12,7 @@ print(`Captured Stdout:\\n${result.stdout}`);
|
||||
print(`Captured Stderr:\\n${result.stderr}`);
|
||||
|
||||
// Example of a silent command that fails (but won't halt because we only suppress output)
|
||||
// let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().do();
|
||||
// let fail_result = run("echo 'This is silent failure stderr.' >&2; exit 1").silent().execute();
|
||||
// print(`Failed command finished (silent):`);
|
||||
// print(`Success: ${fail_result.success}`);
|
||||
// print(`Exit Code: ${fail_result.code}`);
|
||||
|
||||
43
examples/rfsclient/README.md
Normal file
43
examples/rfsclient/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# RFS Client Rhai Examples
|
||||
|
||||
This folder contains Rhai examples that use the SAL RFS client wrappers registered by `sal::rhai::register(&mut engine)` and executed by the `herodo` binary.
|
||||
|
||||
## Quick start
|
||||
|
||||
Run the auth + upload + download example (uses hardcoded credentials and `/etc/hosts` as input):
|
||||
|
||||
```bash
|
||||
cargo run -p herodo -- examples/rfsclient/auth_and_upload.rhai
|
||||
```
|
||||
|
||||
By default, the script:
|
||||
|
||||
- Uses base URL `http://127.0.0.1:8080`
|
||||
- Uses credentials `user` / `password`
|
||||
- Uploads the file `/etc/hosts`
|
||||
- Downloads to `/tmp/rfs_example_out.txt`
|
||||
|
||||
To customize, edit `examples/rfsclient/auth_and_upload.rhai` near the top and change `BASE_URL`, `USER`, `PASS`, and file paths.
|
||||
|
||||
## What the example does
|
||||
|
||||
- Creates the RFS client: `rfs_create_client(BASE_URL, USER, PASS, TIMEOUT)`
|
||||
- Health check: `rfs_health_check()`
|
||||
- Authenticates: `rfs_authenticate()`
|
||||
- Uploads a file: `rfs_upload_file(local_path, chunk_size, verify)` → returns file hash
|
||||
- Downloads it back: `rfs_download_file(file_id_or_hash, dest_path, verify)` → returns unit (throws on error)
|
||||
|
||||
See `examples/rfsclient/auth_and_upload.rhai` for details.
|
||||
|
||||
## Using the Rust client directly (optional)
|
||||
|
||||
If you want to use the Rust API (without Rhai), depend on `sal-rfs-client` and see:
|
||||
|
||||
- `packages/clients/rfsclient/src/client.rs` (`RfsClient`)
|
||||
- `packages/clients/rfsclient/src/types.rs` (config and option types)
|
||||
- `packages/clients/rfsclient/examples/` (example usage)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- Auth failures: verify credentials and that the server requires/authenticates them.
|
||||
- Connection errors: verify the base URL is reachable from your machine.
|
||||
41
examples/rfsclient/auth_and_upload.rhai
Normal file
41
examples/rfsclient/auth_and_upload.rhai
Normal file
@@ -0,0 +1,41 @@
|
||||
// RFS Client: Auth + Upload + Download example
|
||||
// Prereqs:
|
||||
// - RFS server reachable at RFS_BASE_URL
|
||||
// - Valid credentials in env: RFS_USER, RFS_PASS
|
||||
// - Run with herodo so the SAL Rhai modules are registered
|
||||
|
||||
// NOTE: env_get not available in this runtime; hardcode or replace with your env loader
|
||||
let BASE_URL = "http://127.0.0.1:8080";
|
||||
let USER = "user";
|
||||
let PASS = "password";
|
||||
let TIMEOUT = 30; // seconds
|
||||
|
||||
if BASE_URL == "" { throw "Set BASE_URL in the script"; }
|
||||
|
||||
// Create client
|
||||
let ok = rfs_create_client(BASE_URL, USER, PASS, TIMEOUT);
|
||||
if !ok { throw "Failed to create RFS client"; }
|
||||
|
||||
// Optional health check
|
||||
let health = rfs_health_check();
|
||||
print(`RFS health: ${health}`);
|
||||
|
||||
// Authenticate (required for some operations)
|
||||
let auth_ok = rfs_authenticate();
|
||||
if !auth_ok { throw "Authentication failed"; }
|
||||
|
||||
// Upload a local file
|
||||
// Use an existing readable file to avoid needing os_write_file module
|
||||
let local_file = "/etc/hosts";
|
||||
// rfs_upload_file(file_path, chunk_size, verify)
|
||||
let hash = rfs_upload_file(local_file, 0, false);
|
||||
print(`Uploaded file hash: ${hash}`);
|
||||
|
||||
// Download it back
|
||||
let out_path = "/tmp/rfs_example_out.txt";
|
||||
// rfs_download_file(file_id, output_path, verify) returns unit and throws on error
|
||||
rfs_download_file(hash, out_path, false);
|
||||
|
||||
print(`Downloaded to: ${out_path}`);
|
||||
|
||||
true
|
||||
15
examples_rust/ai/Cargo.toml
Normal file
15
examples_rust/ai/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "openrouter_example"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[workspace]
|
||||
|
||||
[[bin]]
|
||||
name = "openrouter_example"
|
||||
path = "openrouter_example.rs"
|
||||
|
||||
[dependencies]
|
||||
codemonkey = { path = "../../packages/ai/codemonkey" }
|
||||
openai-api-rs = "6.0.8"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
47
examples_rust/ai/openrouter_example.rs
Normal file
47
examples_rust/ai/openrouter_example.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
use codemonkey::{create_ai_provider, AIProviderType, CompletionRequestBuilder, Message, MessageRole, Content};
|
||||
use std::error::Error;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn Error>> {
|
||||
|
||||
let (mut provider, provider_type) = create_ai_provider(AIProviderType::OpenRouter)?;
|
||||
|
||||
let messages = vec![Message {
|
||||
role: MessageRole::user,
|
||||
content: Content::Text("Explain the concept of a factory design pattern in Rust.".to_string()),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
}];
|
||||
|
||||
println!("Sending request to OpenRouter...");
|
||||
let response = CompletionRequestBuilder::new(
|
||||
&mut *provider,
|
||||
"openai/gpt-oss-120b".to_string(), // Model name as specified by the user
|
||||
messages,
|
||||
provider_type, // Pass the provider_type
|
||||
)
|
||||
.temperature(1.0)
|
||||
.max_tokens(8192)
|
||||
.top_p(1.0)
|
||||
.reasoning_effort("medium")
|
||||
.stream(false)
|
||||
.openrouter_options(|builder| {
|
||||
builder.provider(
|
||||
codemonkey::OpenRouterProviderOptionsBuilder::new()
|
||||
.order(vec!["cerebras"])
|
||||
.build(),
|
||||
)
|
||||
})
|
||||
.completion()
|
||||
.await?;
|
||||
|
||||
for choice in response.choices {
|
||||
if let Some(content) = choice.message.content {
|
||||
print!("{}", content);
|
||||
}
|
||||
}
|
||||
println!();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
13
examples_rust/ai/run.sh
Executable file
13
examples_rust/ai/run.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Change to directory where this script is located
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
source ../../config/myenv.sh
|
||||
|
||||
# Build the example
|
||||
cargo build
|
||||
|
||||
# Run the example
|
||||
cargo run --bin openrouter_example
|
||||
@@ -3,7 +3,7 @@
|
||||
//! This library loads the Rhai engine, registers all SAL modules,
|
||||
//! and executes Rhai scripts from a specified directory in sorted order.
|
||||
|
||||
use rhai::Engine;
|
||||
use rhai::{Engine, Scope};
|
||||
use std::error::Error;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -29,6 +29,19 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> {
|
||||
|
||||
// Create a new Rhai engine
|
||||
let mut engine = Engine::new();
|
||||
|
||||
// TODO: if we create a scope here we could clean up all the different functionsand types regsitered wit the engine
|
||||
// We should generalize the way we add things to the scope for each module sepeartely
|
||||
let mut scope = Scope::new();
|
||||
// Conditionally add Hetzner client only when env config is present
|
||||
if let Ok(cfg) = sal::hetzner::config::Config::from_env() {
|
||||
let hetzner_client = sal::hetzner::api::Client::new(cfg);
|
||||
scope.push("hetzner", hetzner_client);
|
||||
}
|
||||
// This makes it easy to call e.g. `hetzner.get_server()` or `mycelium.get_connected_peers()`
|
||||
// --> without the need of manually created a client for each one first
|
||||
// --> could be conditionally compiled to only use those who we need (we only push the things to the scope that we actually need to run the script)
|
||||
|
||||
|
||||
// Register println function for output
|
||||
engine.register_fn("println", |s: &str| println!("{}", s));
|
||||
@@ -78,19 +91,20 @@ pub fn run(script_path: &str) -> Result<(), Box<dyn Error>> {
|
||||
let script = fs::read_to_string(&script_file)?;
|
||||
|
||||
// Execute the script
|
||||
match engine.eval::<rhai::Dynamic>(&script) {
|
||||
Ok(result) => {
|
||||
println!("Script executed successfully");
|
||||
if !result.is_unit() {
|
||||
println!("Result: {}", result);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Error executing script: {}", err);
|
||||
// Exit with error code when a script fails
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
// match engine.eval::<rhai::Dynamic>(&script) {
|
||||
// Ok(result) => {
|
||||
// println!("Script executed successfully");
|
||||
// if !result.is_unit() {
|
||||
// println!("Result: {}", result);
|
||||
// }
|
||||
// }
|
||||
// Err(err) => {
|
||||
// eprintln!("Error executing script: {}", err);
|
||||
// // Exit with error code when a script fails
|
||||
// process::exit(1);
|
||||
// }
|
||||
// }
|
||||
engine.run_with_scope(&mut scope, &script)?;
|
||||
}
|
||||
|
||||
println!("\nAll scripts executed successfully!");
|
||||
|
||||
10
packages/ai/codemonkey/Cargo.toml
Normal file
10
packages/ai/codemonkey/Cargo.toml
Normal file
@@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "codemonkey"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
async-trait = "0.1.80"
|
||||
openrouter-rs = "0.4.5"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
216
packages/ai/codemonkey/src/lib.rs
Normal file
216
packages/ai/codemonkey/src/lib.rs
Normal file
@@ -0,0 +1,216 @@
|
||||
use async_trait::async_trait;
|
||||
use openrouter_rs::{OpenRouterClient, api::chat::{ChatCompletionRequest, Message}, types::completion::CompletionsResponse};
|
||||
use std::env;
|
||||
use std::error::Error;
|
||||
|
||||
// Re-export MessageRole for easier use in client code
|
||||
pub use openrouter_rs::types::Role as MessageRole;
|
||||
|
||||
#[async_trait]
|
||||
pub trait AIProvider {
|
||||
async fn completion(
|
||||
&mut self,
|
||||
request: CompletionRequest,
|
||||
) -> Result<CompletionsResponse, Box<dyn Error>>;
|
||||
}
|
||||
|
||||
pub struct CompletionRequest {
|
||||
pub model: String,
|
||||
pub messages: Vec<Message>,
|
||||
pub temperature: Option<f64>,
|
||||
pub max_tokens: Option<i64>,
|
||||
pub top_p: Option<f64>,
|
||||
pub stream: Option<bool>,
|
||||
pub stop: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
pub struct CompletionRequestBuilder<'a> {
|
||||
provider: &'a mut dyn AIProvider,
|
||||
model: String,
|
||||
messages: Vec<Message>,
|
||||
temperature: Option<f64>,
|
||||
max_tokens: Option<i64>,
|
||||
top_p: Option<f64>,
|
||||
stream: Option<bool>,
|
||||
stop: Option<Vec<String>>,
|
||||
provider_type: AIProviderType,
|
||||
}
|
||||
|
||||
impl<'a> CompletionRequestBuilder<'a> {
|
||||
pub fn new(provider: &'a mut dyn AIProvider, model: String, messages: Vec<Message>, provider_type: AIProviderType) -> Self {
|
||||
Self {
|
||||
provider,
|
||||
model,
|
||||
messages,
|
||||
temperature: None,
|
||||
max_tokens: None,
|
||||
top_p: None,
|
||||
stream: None,
|
||||
stop: None,
|
||||
provider_type,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn temperature(mut self, temperature: f64) -> Self {
|
||||
self.temperature = Some(temperature);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn max_tokens(mut self, max_tokens: i64) -> Self {
|
||||
self.max_tokens = Some(max_tokens);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn top_p(mut self, top_p: f64) -> Self {
|
||||
self.top_p = Some(top_p);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn stream(mut self, stream: bool) -> Self {
|
||||
self.stream = Some(stream);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn stop(mut self, stop: Vec<String>) -> Self {
|
||||
self.stop = Some(stop);
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn completion(self) -> Result<CompletionsResponse, Box<dyn Error>> {
|
||||
let request = CompletionRequest {
|
||||
model: self.model,
|
||||
messages: self.messages,
|
||||
temperature: self.temperature,
|
||||
max_tokens: self.max_tokens,
|
||||
top_p: self.top_p,
|
||||
stream: self.stream,
|
||||
stop: self.stop,
|
||||
};
|
||||
self.provider.completion(request).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GroqAIProvider {
|
||||
client: OpenRouterClient,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AIProvider for GroqAIProvider {
|
||||
async fn completion(
|
||||
&mut self,
|
||||
request: CompletionRequest,
|
||||
) -> Result<CompletionsResponse, Box<dyn Error>> {
|
||||
let chat_request = ChatCompletionRequest::builder()
|
||||
.model(request.model)
|
||||
.messages(request.messages)
|
||||
.temperature(request.temperature.unwrap_or(1.0))
|
||||
.max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048))
|
||||
.top_p(request.top_p.unwrap_or(1.0))
|
||||
.build()?;
|
||||
|
||||
let result = self.client.send_chat_completion(&chat_request).await?;
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpenAIProvider {
|
||||
client: OpenRouterClient,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AIProvider for OpenAIProvider {
|
||||
async fn completion(
|
||||
&mut self,
|
||||
request: CompletionRequest,
|
||||
) -> Result<CompletionsResponse, Box<dyn Error>> {
|
||||
let chat_request = ChatCompletionRequest::builder()
|
||||
.model(request.model)
|
||||
.messages(request.messages)
|
||||
.temperature(request.temperature.unwrap_or(1.0))
|
||||
.max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048))
|
||||
.top_p(request.top_p.unwrap_or(1.0))
|
||||
.build()?;
|
||||
|
||||
let result = self.client.send_chat_completion(&chat_request).await?;
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpenRouterAIProvider {
|
||||
client: OpenRouterClient,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AIProvider for OpenRouterAIProvider {
|
||||
async fn completion(
|
||||
&mut self,
|
||||
request: CompletionRequest,
|
||||
) -> Result<CompletionsResponse, Box<dyn Error>> {
|
||||
let chat_request = ChatCompletionRequest::builder()
|
||||
.model(request.model)
|
||||
.messages(request.messages)
|
||||
.temperature(request.temperature.unwrap_or(1.0))
|
||||
.max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048))
|
||||
.top_p(request.top_p.unwrap_or(1.0))
|
||||
.build()?;
|
||||
|
||||
let result = self.client.send_chat_completion(&chat_request).await?;
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CerebrasAIProvider {
|
||||
client: OpenRouterClient,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl AIProvider for CerebrasAIProvider {
|
||||
async fn completion(
|
||||
&mut self,
|
||||
request: CompletionRequest,
|
||||
) -> Result<CompletionsResponse, Box<dyn Error>> {
|
||||
let chat_request = ChatCompletionRequest::builder()
|
||||
.model(request.model)
|
||||
.messages(request.messages)
|
||||
.temperature(request.temperature.unwrap_or(1.0))
|
||||
.max_tokens(request.max_tokens.map(|x| x as u32).unwrap_or(2048))
|
||||
.top_p(request.top_p.unwrap_or(1.0))
|
||||
.build()?;
|
||||
|
||||
let result = self.client.send_chat_completion(&chat_request).await?;
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum AIProviderType {
|
||||
Groq,
|
||||
OpenAI,
|
||||
OpenRouter,
|
||||
Cerebras,
|
||||
}
|
||||
|
||||
pub fn create_ai_provider(provider_type: AIProviderType) -> Result<(Box<dyn AIProvider>, AIProviderType), Box<dyn Error>> {
|
||||
match provider_type {
|
||||
AIProviderType::Groq => {
|
||||
let api_key = env::var("GROQ_API_KEY")?;
|
||||
let client = OpenRouterClient::builder().api_key(api_key).build()?;
|
||||
Ok((Box::new(GroqAIProvider { client }), AIProviderType::Groq))
|
||||
}
|
||||
AIProviderType::OpenAI => {
|
||||
let api_key = env::var("OPENAI_API_KEY")?;
|
||||
let client = OpenRouterClient::builder().api_key(api_key).build()?;
|
||||
Ok((Box::new(OpenAIProvider { client }), AIProviderType::OpenAI))
|
||||
}
|
||||
AIProviderType::OpenRouter => {
|
||||
let api_key = env::var("OPENROUTER_API_KEY")?;
|
||||
let client = OpenRouterClient::builder().api_key(api_key).build()?;
|
||||
Ok((Box::new(OpenRouterAIProvider { client }), AIProviderType::OpenRouter))
|
||||
}
|
||||
AIProviderType::Cerebras => {
|
||||
let api_key = env::var("CEREBRAS_API_KEY")?;
|
||||
let client = OpenRouterClient::builder().api_key(api_key).build()?;
|
||||
Ok((Box::new(CerebrasAIProvider { client }), AIProviderType::Cerebras))
|
||||
}
|
||||
}
|
||||
}
|
||||
12
packages/clients/hetznerclient/Cargo.toml
Normal file
12
packages/clients/hetznerclient/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "sal-hetzner"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
prettytable = "0.10.0"
|
||||
reqwest.workspace = true
|
||||
rhai = { workspace = true, features = ["serde"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
54
packages/clients/hetznerclient/src/api/error.rs
Normal file
54
packages/clients/hetznerclient/src/api/error.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use std::fmt;
|
||||
|
||||
use serde::Deserialize;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum AppError {
|
||||
#[error("Request failed: {0}")]
|
||||
RequestError(#[from] reqwest::Error),
|
||||
#[error("API error: {0}")]
|
||||
ApiError(ApiError),
|
||||
#[error("Deserialization Error: {0:?}")]
|
||||
SerdeJsonError(#[from] serde_json::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ApiError {
|
||||
pub status: u16,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl From<reqwest::blocking::Response> for ApiError {
|
||||
fn from(value: reqwest::blocking::Response) -> Self {
|
||||
ApiError {
|
||||
status: value.status().into(),
|
||||
message: value.text().unwrap_or("The API call returned an error.".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ApiError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
#[derive(Deserialize)]
|
||||
struct HetznerApiError {
|
||||
code: String,
|
||||
message: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct HetznerApiErrorWrapper {
|
||||
error: HetznerApiError,
|
||||
}
|
||||
|
||||
if let Ok(wrapper) = serde_json::from_str::<HetznerApiErrorWrapper>(&self.message) {
|
||||
write!(
|
||||
f,
|
||||
"Status: {}, Code: {}, Message: {}",
|
||||
self.status, wrapper.error.code, wrapper.error.message
|
||||
)
|
||||
} else {
|
||||
write!(f, "Status: {}: {}", self.status, self.message)
|
||||
}
|
||||
}
|
||||
}
|
||||
513
packages/clients/hetznerclient/src/api/mod.rs
Normal file
513
packages/clients/hetznerclient/src/api/mod.rs
Normal file
@@ -0,0 +1,513 @@
|
||||
pub mod error;
|
||||
pub mod models;
|
||||
|
||||
use self::models::{
|
||||
Boot, Rescue, Server, SshKey, ServerAddonProduct, ServerAddonProductWrapper,
|
||||
AuctionServerProduct, AuctionServerProductWrapper, AuctionTransaction,
|
||||
AuctionTransactionWrapper, BootWrapper, Cancellation, CancellationWrapper,
|
||||
OrderServerBuilder, OrderServerProduct, OrderServerProductWrapper, RescueWrapped,
|
||||
ServerWrapper, SshKeyWrapper, Transaction, TransactionWrapper,
|
||||
ServerAddonTransaction, ServerAddonTransactionWrapper,
|
||||
OrderServerAddonBuilder,
|
||||
};
|
||||
use crate::api::error::ApiError;
|
||||
use crate::config::Config;
|
||||
use error::AppError;
|
||||
use reqwest::blocking::Client as HttpClient;
|
||||
use serde_json::json;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Client {
|
||||
http_client: HttpClient,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
pub fn new(config: Config) -> Self {
|
||||
Self {
|
||||
http_client: HttpClient::new(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_response<T>(&self, response: reqwest::blocking::Response) -> Result<T, AppError>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let status = response.status();
|
||||
let body = response.text()?;
|
||||
|
||||
if status.is_success() {
|
||||
serde_json::from_str::<T>(&body).map_err(Into::into)
|
||||
} else {
|
||||
Err(AppError::ApiError(ApiError {
|
||||
status: status.as_u16(),
|
||||
message: body,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_server(&self, server_number: i32) -> Result<Server, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/server/{}", self.config.api_url, server_number))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: ServerWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.server)
|
||||
}
|
||||
|
||||
pub fn get_servers(&self) -> Result<Vec<Server>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/server", self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<ServerWrapper> = self.handle_response(response)?;
|
||||
let servers = wrapped.into_iter().map(|sw| sw.server).collect();
|
||||
Ok(servers)
|
||||
}
|
||||
|
||||
pub fn update_server_name(&self, server_number: i32, name: &str) -> Result<Server, AppError> {
|
||||
let params = [("server_name", name)];
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/server/{}", self.config.api_url, server_number))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: ServerWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.server)
|
||||
}
|
||||
|
||||
pub fn get_cancellation_data(&self, server_number: i32) -> Result<Cancellation, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/server/{}/cancellation",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: CancellationWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.cancellation)
|
||||
}
|
||||
|
||||
pub fn cancel_server(
|
||||
&self,
|
||||
server_number: i32,
|
||||
cancellation_date: &str,
|
||||
) -> Result<Cancellation, AppError> {
|
||||
let params = [("cancellation_date", cancellation_date)];
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!(
|
||||
"{}/server/{}/cancellation",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: CancellationWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.cancellation)
|
||||
}
|
||||
|
||||
pub fn withdraw_cancellation(&self, server_number: i32) -> Result<(), AppError> {
|
||||
self.http_client
|
||||
.delete(format!(
|
||||
"{}/server/{}/cancellation",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_ssh_keys(&self) -> Result<Vec<SshKey>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/key", self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<SshKeyWrapper> = self.handle_response(response)?;
|
||||
let keys = wrapped.into_iter().map(|sk| sk.key).collect();
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
pub fn get_ssh_key(&self, fingerprint: &str) -> Result<SshKey, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/key/{}", self.config.api_url, fingerprint))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: SshKeyWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.key)
|
||||
}
|
||||
|
||||
pub fn add_ssh_key(&self, name: &str, data: &str) -> Result<SshKey, AppError> {
|
||||
let params = [("name", name), ("data", data)];
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/key", self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: SshKeyWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.key)
|
||||
}
|
||||
|
||||
pub fn update_ssh_key_name(&self, fingerprint: &str, name: &str) -> Result<SshKey, AppError> {
|
||||
let params = [("name", name)];
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/key/{}", self.config.api_url, fingerprint))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: SshKeyWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.key)
|
||||
}
|
||||
|
||||
pub fn delete_ssh_key(&self, fingerprint: &str) -> Result<(), AppError> {
|
||||
self.http_client
|
||||
.delete(format!("{}/key/{}", self.config.api_url, fingerprint))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub fn get_boot_configuration(&self, server_number: i32) -> Result<Boot, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/boot/{}", self.config.api_url, server_number))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: BootWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.boot)
|
||||
}
|
||||
|
||||
pub fn get_rescue_boot_configuration(&self, server_number: i32) -> Result<Rescue, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/boot/{}/rescue",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: RescueWrapped = self.handle_response(response)?;
|
||||
Ok(wrapped.rescue)
|
||||
}
|
||||
|
||||
pub fn enable_rescue_mode(
|
||||
&self,
|
||||
server_number: i32,
|
||||
os: &str,
|
||||
authorized_keys: Option<&[String]>,
|
||||
) -> Result<Rescue, AppError> {
|
||||
let mut params = vec![("os", os)];
|
||||
if let Some(keys) = authorized_keys {
|
||||
for key in keys {
|
||||
params.push(("authorized_key[]", key));
|
||||
}
|
||||
}
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!(
|
||||
"{}/boot/{}/rescue",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: RescueWrapped = self.handle_response(response)?;
|
||||
Ok(wrapped.rescue)
|
||||
}
|
||||
|
||||
pub fn disable_rescue_mode(&self, server_number: i32) -> Result<Rescue, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.delete(format!(
|
||||
"{}/boot/{}/rescue",
|
||||
self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: RescueWrapped = self.handle_response(response)?;
|
||||
Ok(wrapped.rescue)
|
||||
}
|
||||
|
||||
pub fn get_server_products(
|
||||
&self,
|
||||
) -> Result<Vec<OrderServerProduct>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server/product", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<OrderServerProductWrapper> = self.handle_response(response)?;
|
||||
let products = wrapped.into_iter().map(|sop| sop.product).collect();
|
||||
Ok(products)
|
||||
}
|
||||
|
||||
pub fn get_server_product_by_id(
|
||||
&self,
|
||||
product_id: &str,
|
||||
) -> Result<OrderServerProduct, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server/product/{}",
|
||||
&self.config.api_url, product_id
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: OrderServerProductWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.product)
|
||||
}
|
||||
pub fn order_server(&self, order: OrderServerBuilder) -> Result<Transaction, AppError> {
|
||||
let mut params = json!({
|
||||
"product_id": order.product_id,
|
||||
"dist": order.dist,
|
||||
"location": order.location,
|
||||
"authorized_key": order.authorized_keys.unwrap_or_default(),
|
||||
});
|
||||
|
||||
if let Some(addons) = order.addons {
|
||||
params["addon"] = json!(addons);
|
||||
}
|
||||
|
||||
if let Some(test) = order.test {
|
||||
if test {
|
||||
params["test"] = json!(test);
|
||||
}
|
||||
}
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/order/server/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.json(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: TransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
|
||||
pub fn get_transaction_by_id(&self, transaction_id: &str) -> Result<Transaction, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server/transaction/{}",
|
||||
&self.config.api_url, transaction_id
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: TransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
pub fn get_transactions(&self) -> Result<Vec<Transaction>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<TransactionWrapper> = self.handle_response(response)?;
|
||||
let transactions = wrapped.into_iter().map(|t| t.transaction).collect();
|
||||
Ok(transactions)
|
||||
}
|
||||
pub fn get_auction_server_products(&self) -> Result<Vec<AuctionServerProduct>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server_market/product",
|
||||
&self.config.api_url
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<AuctionServerProductWrapper> = self.handle_response(response)?;
|
||||
let products = wrapped.into_iter().map(|asp| asp.product).collect();
|
||||
Ok(products)
|
||||
}
|
||||
pub fn get_auction_server_product_by_id(&self, product_id: &str) -> Result<AuctionServerProduct, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server_market/product/{}", &self.config.api_url, product_id))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: AuctionServerProductWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.product)
|
||||
}
|
||||
pub fn get_auction_transactions(&self) -> Result<Vec<AuctionTransaction>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server_market/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<AuctionTransactionWrapper> = self.handle_response(response)?;
|
||||
let transactions = wrapped.into_iter().map(|t| t.transaction).collect();
|
||||
Ok(transactions)
|
||||
}
|
||||
|
||||
pub fn get_auction_transaction_by_id(&self, transaction_id: &str) -> Result<AuctionTransaction, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server_market/transaction/{}", &self.config.api_url, transaction_id))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: AuctionTransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
|
||||
pub fn get_server_addon_products(
|
||||
&self,
|
||||
server_number: i64,
|
||||
) -> Result<Vec<ServerAddonProduct>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server_addon/{}/product",
|
||||
&self.config.api_url, server_number
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<ServerAddonProductWrapper> = self.handle_response(response)?;
|
||||
let products = wrapped.into_iter().map(|sap| sap.product).collect();
|
||||
Ok(products)
|
||||
}
|
||||
|
||||
pub fn order_auction_server(
|
||||
&self,
|
||||
product_id: i64,
|
||||
authorized_keys: Vec<String>,
|
||||
dist: Option<String>,
|
||||
arch: Option<String>,
|
||||
lang: Option<String>,
|
||||
comment: Option<String>,
|
||||
addons: Option<Vec<String>>,
|
||||
test: Option<bool>,
|
||||
) -> Result<AuctionTransaction, AppError> {
|
||||
let mut params: Vec<(&str, String)> = Vec::new();
|
||||
|
||||
params.push(("product_id", product_id.to_string()));
|
||||
|
||||
for key in &authorized_keys {
|
||||
params.push(("authorized_key[]", key.clone()));
|
||||
}
|
||||
|
||||
if let Some(dist) = dist {
|
||||
params.push(("dist", dist));
|
||||
}
|
||||
if let Some(arch) = arch {
|
||||
params.push(("@deprecated arch", arch));
|
||||
}
|
||||
if let Some(lang) = lang {
|
||||
params.push(("lang", lang));
|
||||
}
|
||||
if let Some(comment) = comment {
|
||||
params.push(("comment", comment));
|
||||
}
|
||||
if let Some(addons) = addons {
|
||||
for addon in addons {
|
||||
params.push(("addon[]", addon));
|
||||
}
|
||||
}
|
||||
if let Some(test) = test {
|
||||
params.push(("test", test.to_string()));
|
||||
}
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/order/server_market/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: AuctionTransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
|
||||
pub fn get_server_addon_transactions(&self) -> Result<Vec<ServerAddonTransaction>, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!("{}/order/server_addon/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: Vec<ServerAddonTransactionWrapper> = self.handle_response(response)?;
|
||||
let transactions = wrapped.into_iter().map(|satw| satw.transaction).collect();
|
||||
Ok(transactions)
|
||||
}
|
||||
|
||||
pub fn get_server_addon_transaction_by_id(
|
||||
&self,
|
||||
transaction_id: &str,
|
||||
) -> Result<ServerAddonTransaction, AppError> {
|
||||
let response = self
|
||||
.http_client
|
||||
.get(format!(
|
||||
"{}/order/server_addon/transaction/{}",
|
||||
&self.config.api_url, transaction_id
|
||||
))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.send()?;
|
||||
|
||||
let wrapped: ServerAddonTransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
|
||||
pub fn order_server_addon(
|
||||
&self,
|
||||
order: OrderServerAddonBuilder,
|
||||
) -> Result<ServerAddonTransaction, AppError> {
|
||||
let mut params = json!({
|
||||
"server_number": order.server_number,
|
||||
"product_id": order.product_id,
|
||||
});
|
||||
|
||||
if let Some(reason) = order.reason {
|
||||
params["reason"] = json!(reason);
|
||||
}
|
||||
if let Some(gateway) = order.gateway {
|
||||
params["gateway"] = json!(gateway);
|
||||
}
|
||||
if let Some(test) = order.test {
|
||||
if test {
|
||||
params["test"] = json!(test);
|
||||
}
|
||||
}
|
||||
|
||||
let response = self
|
||||
.http_client
|
||||
.post(format!("{}/order/server_addon/transaction", &self.config.api_url))
|
||||
.basic_auth(&self.config.username, Some(&self.config.password))
|
||||
.form(¶ms)
|
||||
.send()?;
|
||||
|
||||
let wrapped: ServerAddonTransactionWrapper = self.handle_response(response)?;
|
||||
Ok(wrapped.transaction)
|
||||
}
|
||||
}
|
||||
1894
packages/clients/hetznerclient/src/api/models.rs
Normal file
1894
packages/clients/hetznerclient/src/api/models.rs
Normal file
File diff suppressed because it is too large
Load Diff
25
packages/clients/hetznerclient/src/config.rs
Normal file
25
packages/clients/hetznerclient/src/config.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
use std::env;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Config {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
pub api_url: String,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_env() -> Result<Self, String> {
|
||||
let username = env::var("HETZNER_USERNAME")
|
||||
.map_err(|_| "HETZNER_USERNAME environment variable not set".to_string())?;
|
||||
let password = env::var("HETZNER_PASSWORD")
|
||||
.map_err(|_| "HETZNER_PASSWORD environment variable not set".to_string())?;
|
||||
let api_url = env::var("HETZNER_API_URL")
|
||||
.unwrap_or_else(|_| "https://robot-ws.your-server.de".to_string());
|
||||
|
||||
Ok(Config {
|
||||
username,
|
||||
password,
|
||||
api_url,
|
||||
})
|
||||
}
|
||||
}
|
||||
3
packages/clients/hetznerclient/src/lib.rs
Normal file
3
packages/clients/hetznerclient/src/lib.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod api;
|
||||
pub mod config;
|
||||
pub mod rhai;
|
||||
63
packages/clients/hetznerclient/src/rhai/boot.rs
Normal file
63
packages/clients/hetznerclient/src/rhai/boot.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use crate::api::{
|
||||
models::{Boot, Rescue},
|
||||
Client,
|
||||
};
|
||||
use rhai::{plugin::*, Engine};
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
let boot_module = exported_module!(boot_api);
|
||||
engine.register_global_module(boot_module.into());
|
||||
}
|
||||
|
||||
#[export_module]
|
||||
pub mod boot_api {
|
||||
use super::*;
|
||||
use rhai::EvalAltResult;
|
||||
|
||||
#[rhai_fn(name = "get_boot_configuration", return_raw)]
|
||||
pub fn get_boot_configuration(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Boot, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_boot_configuration(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_rescue_boot_configuration", return_raw)]
|
||||
pub fn get_rescue_boot_configuration(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Rescue, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_rescue_boot_configuration(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "enable_rescue_mode", return_raw)]
|
||||
pub fn enable_rescue_mode(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
os: &str,
|
||||
authorized_keys: rhai::Array,
|
||||
) -> Result<Rescue, Box<EvalAltResult>> {
|
||||
let keys: Vec<String> = authorized_keys
|
||||
.into_iter()
|
||||
.map(|k| k.into_string().unwrap())
|
||||
.collect();
|
||||
|
||||
client
|
||||
.enable_rescue_mode(server_number as i32, os, Some(&keys))
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "disable_rescue_mode", return_raw)]
|
||||
pub fn disable_rescue_mode(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Rescue, Box<EvalAltResult>> {
|
||||
client
|
||||
.disable_rescue_mode(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
}
|
||||
54
packages/clients/hetznerclient/src/rhai/mod.rs
Normal file
54
packages/clients/hetznerclient/src/rhai/mod.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use rhai::{Engine, EvalAltResult};
|
||||
|
||||
use crate::api::models::{
|
||||
AuctionServerProduct, AuctionTransaction, AuctionTransactionProduct, AuthorizedKey, Boot,
|
||||
Cancellation, Cpanel, HostKey, Linux, OrderAuctionServerBuilder, OrderServerAddonBuilder,
|
||||
OrderServerBuilder, OrderServerProduct, Plesk, Rescue, Server, ServerAddonProduct,
|
||||
ServerAddonResource, ServerAddonTransaction, SshKey, Transaction, TransactionProduct, Vnc,
|
||||
Windows,
|
||||
};
|
||||
|
||||
pub mod boot;
|
||||
pub mod printing;
|
||||
pub mod server;
|
||||
pub mod server_ordering;
|
||||
pub mod ssh_keys;
|
||||
|
||||
// here just register the hetzner module
|
||||
pub fn register_hetzner_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
// TODO:register types
|
||||
engine.build_type::<Server>();
|
||||
engine.build_type::<SshKey>();
|
||||
engine.build_type::<Boot>();
|
||||
engine.build_type::<Rescue>();
|
||||
engine.build_type::<Linux>();
|
||||
engine.build_type::<Vnc>();
|
||||
engine.build_type::<Windows>();
|
||||
engine.build_type::<Plesk>();
|
||||
engine.build_type::<Cpanel>();
|
||||
engine.build_type::<Cancellation>();
|
||||
engine.build_type::<OrderServerProduct>();
|
||||
engine.build_type::<Transaction>();
|
||||
engine.build_type::<AuthorizedKey>();
|
||||
engine.build_type::<TransactionProduct>();
|
||||
engine.build_type::<HostKey>();
|
||||
engine.build_type::<AuctionServerProduct>();
|
||||
engine.build_type::<AuctionTransaction>();
|
||||
engine.build_type::<AuctionTransactionProduct>();
|
||||
engine.build_type::<OrderAuctionServerBuilder>();
|
||||
engine.build_type::<OrderServerBuilder>();
|
||||
engine.build_type::<ServerAddonProduct>();
|
||||
engine.build_type::<ServerAddonTransaction>();
|
||||
engine.build_type::<ServerAddonResource>();
|
||||
engine.build_type::<OrderServerAddonBuilder>();
|
||||
|
||||
server::register(engine);
|
||||
ssh_keys::register(engine);
|
||||
boot::register(engine);
|
||||
server_ordering::register(engine);
|
||||
|
||||
// TODO: push hetzner to scope as value client:
|
||||
// scope.push("hetzner", client);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
43
packages/clients/hetznerclient/src/rhai/printing/mod.rs
Normal file
43
packages/clients/hetznerclient/src/rhai/printing/mod.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use rhai::{Array, Engine};
|
||||
use crate::{api::models::{OrderServerProduct, AuctionServerProduct, AuctionTransaction, ServerAddonProduct, ServerAddonTransaction, Server, SshKey}};
|
||||
|
||||
mod servers_table;
|
||||
mod ssh_keys_table;
|
||||
mod server_ordering_table;
|
||||
|
||||
// This will be called when we print(...) or pretty_print() an Array (with Dynamic values)
|
||||
pub fn pretty_print_dispatch(array: Array) {
|
||||
if array.is_empty() {
|
||||
println!("<empty table>");
|
||||
return;
|
||||
}
|
||||
|
||||
let first = &array[0];
|
||||
|
||||
if first.is::<Server>() {
|
||||
println!("Yeah first is server!");
|
||||
servers_table::pretty_print_servers(array);
|
||||
} else if first.is::<SshKey>() {
|
||||
ssh_keys_table::pretty_print_ssh_keys(array);
|
||||
}
|
||||
else if first.is::<OrderServerProduct>() {
|
||||
server_ordering_table::pretty_print_server_products(array);
|
||||
} else if first.is::<AuctionServerProduct>() {
|
||||
server_ordering_table::pretty_print_auction_server_products(array);
|
||||
} else if first.is::<AuctionTransaction>() {
|
||||
server_ordering_table::pretty_print_auction_transactions(array);
|
||||
} else if first.is::<ServerAddonProduct>() {
|
||||
server_ordering_table::pretty_print_server_addon_products(array);
|
||||
} else if first.is::<ServerAddonTransaction>() {
|
||||
server_ordering_table::pretty_print_server_addon_transactions(array);
|
||||
} else {
|
||||
// Generic fallback for other types
|
||||
for item in array {
|
||||
println!("{}", item.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
engine.register_fn("pretty_print", pretty_print_dispatch);
|
||||
}
|
||||
@@ -0,0 +1,293 @@
|
||||
use prettytable::{row, Table};
|
||||
use crate::api::models::{OrderServerProduct, ServerAddonProduct, ServerAddonTransaction, ServerAddonResource};
|
||||
|
||||
pub fn pretty_print_server_products(products: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Name",
|
||||
"Description",
|
||||
"Traffic",
|
||||
"Location",
|
||||
"Price (Net)",
|
||||
"Price (Gross)",
|
||||
]);
|
||||
|
||||
for product_dyn in products {
|
||||
if let Some(product) = product_dyn.try_cast::<OrderServerProduct>() {
|
||||
let mut price_net = "N/A".to_string();
|
||||
let mut price_gross = "N/A".to_string();
|
||||
|
||||
if let Some(first_price) = product.prices.first() {
|
||||
price_net = first_price.price.net.clone();
|
||||
price_gross = first_price.price.gross.clone();
|
||||
}
|
||||
|
||||
table.add_row(row![
|
||||
product.id,
|
||||
product.name,
|
||||
product.description.join(", "),
|
||||
product.traffic,
|
||||
product.location.join(", "),
|
||||
price_net,
|
||||
price_gross,
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn pretty_print_auction_server_products(products: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Name",
|
||||
"Description",
|
||||
"Traffic",
|
||||
"Distributions",
|
||||
"Architectures",
|
||||
"Languages",
|
||||
"CPU",
|
||||
"CPU Benchmark",
|
||||
"Memory Size (GB)",
|
||||
"HDD Size (GB)",
|
||||
"HDD Text",
|
||||
"HDD Count",
|
||||
"Datacenter",
|
||||
"Network Speed",
|
||||
"Price (Net)",
|
||||
"Price (Hourly Net)",
|
||||
"Price (Setup Net)",
|
||||
"Price (VAT)",
|
||||
"Price (Hourly VAT)",
|
||||
"Price (Setup VAT)",
|
||||
"Fixed Price",
|
||||
"Next Reduce (seconds)",
|
||||
"Next Reduce Date",
|
||||
"Orderable Addons",
|
||||
]);
|
||||
|
||||
for product_dyn in products {
|
||||
if let Some(product) = product_dyn.try_cast::<crate::api::models::AuctionServerProduct>() {
|
||||
let mut addons_table = Table::new();
|
||||
addons_table.add_row(row![b => "ID", "Name", "Min", "Max", "Prices"]);
|
||||
for addon in &product.orderable_addons {
|
||||
let mut addon_prices_table = Table::new();
|
||||
addon_prices_table.add_row(row![b => "Location", "Net", "Gross", "Hourly Net", "Hourly Gross", "Setup Net", "Setup Gross"]);
|
||||
for price in &addon.prices {
|
||||
addon_prices_table.add_row(row![
|
||||
price.location,
|
||||
price.price.net,
|
||||
price.price.gross,
|
||||
price.price.hourly_net,
|
||||
price.price.hourly_gross,
|
||||
price.price_setup.net,
|
||||
price.price_setup.gross
|
||||
]);
|
||||
}
|
||||
addons_table.add_row(row![
|
||||
addon.id,
|
||||
addon.name,
|
||||
addon.min,
|
||||
addon.max,
|
||||
addon_prices_table
|
||||
]);
|
||||
}
|
||||
|
||||
table.add_row(row![
|
||||
product.id,
|
||||
product.name,
|
||||
product.description.join(", "),
|
||||
product.traffic,
|
||||
product.dist.join(", "),
|
||||
product.dist.join(", "),
|
||||
product.lang.join(", "),
|
||||
product.cpu,
|
||||
product.cpu_benchmark,
|
||||
product.memory_size,
|
||||
product.hdd_size,
|
||||
product.hdd_text,
|
||||
product.hdd_count,
|
||||
product.datacenter,
|
||||
product.network_speed,
|
||||
product.price,
|
||||
product.price_hourly.as_deref().unwrap_or("N/A"),
|
||||
product.price_setup,
|
||||
product.price_with_vat,
|
||||
product.price_hourly_with_vat.as_deref().unwrap_or("N/A"),
|
||||
product.price_setup_with_vat,
|
||||
product.fixed_price,
|
||||
product.next_reduce,
|
||||
product.next_reduce_date,
|
||||
addons_table,
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn pretty_print_server_addon_products(products: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Name",
|
||||
"Type",
|
||||
"Location",
|
||||
"Price (Net)",
|
||||
"Price (Gross)",
|
||||
"Hourly Net",
|
||||
"Hourly Gross",
|
||||
"Setup Net",
|
||||
"Setup Gross",
|
||||
]);
|
||||
|
||||
for product_dyn in products {
|
||||
if let Some(product) = product_dyn.try_cast::<ServerAddonProduct>() {
|
||||
table.add_row(row![
|
||||
product.id,
|
||||
product.name,
|
||||
product.product_type,
|
||||
product.price.location,
|
||||
product.price.price.net,
|
||||
product.price.price.gross,
|
||||
product.price.price.hourly_net,
|
||||
product.price.price.hourly_gross,
|
||||
product.price.price_setup.net,
|
||||
product.price.price_setup.gross,
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn pretty_print_auction_transactions(transactions: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Date",
|
||||
"Status",
|
||||
"Server Number",
|
||||
"Server IP",
|
||||
"Comment",
|
||||
"Product ID",
|
||||
"Product Name",
|
||||
"Product Traffic",
|
||||
"Product Distributions",
|
||||
"Product Architectures",
|
||||
"Product Languages",
|
||||
"Product CPU",
|
||||
"Product CPU Benchmark",
|
||||
"Product Memory Size (GB)",
|
||||
"Product HDD Size (GB)",
|
||||
"Product HDD Text",
|
||||
"Product HDD Count",
|
||||
"Product Datacenter",
|
||||
"Product Network Speed",
|
||||
"Product Fixed Price",
|
||||
"Product Next Reduce (seconds)",
|
||||
"Product Next Reduce Date",
|
||||
"Addons",
|
||||
]);
|
||||
|
||||
for transaction_dyn in transactions {
|
||||
if let Some(transaction) = transaction_dyn.try_cast::<crate::api::models::AuctionTransaction>() {
|
||||
let _authorized_keys_table = {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b => "Name", "Fingerprint", "Type", "Size"]);
|
||||
for key in &transaction.authorized_key {
|
||||
table.add_row(row![
|
||||
key.key.name.as_deref().unwrap_or("N/A"),
|
||||
key.key.fingerprint.as_deref().unwrap_or("N/A"),
|
||||
key.key.key_type.as_deref().unwrap_or("N/A"),
|
||||
key.key.size.map_or("N/A".to_string(), |s| s.to_string())
|
||||
]);
|
||||
}
|
||||
table
|
||||
};
|
||||
|
||||
let _host_keys_table = {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b => "Fingerprint", "Type", "Size"]);
|
||||
for key in &transaction.host_key {
|
||||
table.add_row(row![
|
||||
key.key.fingerprint.as_deref().unwrap_or("N/A"),
|
||||
key.key.key_type.as_deref().unwrap_or("N/A"),
|
||||
key.key.size.map_or("N/A".to_string(), |s| s.to_string())
|
||||
]);
|
||||
}
|
||||
table
|
||||
};
|
||||
|
||||
table.add_row(row![
|
||||
transaction.id,
|
||||
transaction.date,
|
||||
transaction.status,
|
||||
transaction.server_number.map_or("N/A".to_string(), |id| id.to_string()),
|
||||
transaction.server_ip.as_deref().unwrap_or("N/A"),
|
||||
transaction.comment.as_deref().unwrap_or("N/A"),
|
||||
transaction.product.id,
|
||||
transaction.product.name,
|
||||
transaction.product.traffic,
|
||||
transaction.product.dist,
|
||||
transaction.product.arch.as_deref().unwrap_or("N/A"),
|
||||
transaction.product.lang,
|
||||
transaction.product.cpu,
|
||||
transaction.product.cpu_benchmark,
|
||||
transaction.product.memory_size,
|
||||
transaction.product.hdd_size,
|
||||
transaction.product.hdd_text,
|
||||
transaction.product.hdd_count,
|
||||
transaction.product.datacenter,
|
||||
transaction.product.network_speed,
|
||||
transaction.product.fixed_price.unwrap_or_default().to_string(),
|
||||
transaction
|
||||
.product
|
||||
.next_reduce
|
||||
.map_or("N/A".to_string(), |r| r.to_string()),
|
||||
transaction
|
||||
.product
|
||||
.next_reduce_date
|
||||
.as_deref()
|
||||
.unwrap_or("N/A"),
|
||||
transaction.addons.join(", "),
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
|
||||
pub fn pretty_print_server_addon_transactions(transactions: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"ID",
|
||||
"Date",
|
||||
"Status",
|
||||
"Server Number",
|
||||
"Product ID",
|
||||
"Product Name",
|
||||
"Product Price",
|
||||
"Resources",
|
||||
]);
|
||||
|
||||
for transaction_dyn in transactions {
|
||||
if let Some(transaction) = transaction_dyn.try_cast::<ServerAddonTransaction>() {
|
||||
let mut resources_table = Table::new();
|
||||
resources_table.add_row(row![b => "Type", "ID"]);
|
||||
for resource in &transaction.resources {
|
||||
resources_table.add_row(row![resource.resource_type, resource.id]);
|
||||
}
|
||||
|
||||
table.add_row(row![
|
||||
transaction.id,
|
||||
transaction.date,
|
||||
transaction.status,
|
||||
transaction.server_number,
|
||||
transaction.product.id,
|
||||
transaction.product.name,
|
||||
transaction.product.price.to_string(),
|
||||
resources_table,
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
use prettytable::{row, Table};
|
||||
use rhai::Array;
|
||||
|
||||
use super::Server;
|
||||
|
||||
pub fn pretty_print_servers(servers: Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"Number",
|
||||
"Name",
|
||||
"IP",
|
||||
"Product",
|
||||
"DC",
|
||||
"Status"
|
||||
]);
|
||||
|
||||
for server_dyn in servers {
|
||||
if let Some(server) = server_dyn.try_cast::<Server>() {
|
||||
table.add_row(row![
|
||||
server.server_number.to_string(),
|
||||
server.server_name,
|
||||
server.server_ip.unwrap_or("N/A".to_string()),
|
||||
server.product,
|
||||
server.dc,
|
||||
server.status
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
use prettytable::{row, Table};
|
||||
use super::SshKey;
|
||||
|
||||
pub fn pretty_print_ssh_keys(keys: rhai::Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"Name",
|
||||
"Fingerprint",
|
||||
"Type",
|
||||
"Size",
|
||||
"Created At"
|
||||
]);
|
||||
|
||||
for key_dyn in keys {
|
||||
if let Some(key) = key_dyn.try_cast::<SshKey>() {
|
||||
table.add_row(row![
|
||||
key.name,
|
||||
key.fingerprint,
|
||||
key.key_type,
|
||||
key.size.to_string(),
|
||||
key.created_at
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
76
packages/clients/hetznerclient/src/rhai/server.rs
Normal file
76
packages/clients/hetznerclient/src/rhai/server.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
use crate::api::{Client, models::Server};
|
||||
use rhai::{Array, Dynamic, plugin::*};
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
let server_module = exported_module!(server_api);
|
||||
engine.register_global_module(server_module.into());
|
||||
}
|
||||
|
||||
#[export_module]
|
||||
pub mod server_api {
|
||||
use crate::api::models::Cancellation;
|
||||
|
||||
use super::*;
|
||||
use rhai::EvalAltResult;
|
||||
|
||||
#[rhai_fn(name = "get_server", return_raw)]
|
||||
pub fn get_server(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Server, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_server(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_servers", return_raw)]
|
||||
pub fn get_servers(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let servers = client
|
||||
.get_servers()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
println!("number of SERVERS we got: {:#?}", servers.len());
|
||||
Ok(servers.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "update_server_name", return_raw)]
|
||||
pub fn update_server_name(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
name: &str,
|
||||
) -> Result<Server, Box<EvalAltResult>> {
|
||||
client
|
||||
.update_server_name(server_number as i32, name)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_cancellation_data", return_raw)]
|
||||
pub fn get_cancellation_data(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Cancellation, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_cancellation_data(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "cancel_server", return_raw)]
|
||||
pub fn cancel_server(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
cancellation_date: &str,
|
||||
) -> Result<Cancellation, Box<EvalAltResult>> {
|
||||
client
|
||||
.cancel_server(server_number as i32, cancellation_date)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "withdraw_cancellation", return_raw)]
|
||||
pub fn withdraw_cancellation(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<(), Box<EvalAltResult>> {
|
||||
client
|
||||
.withdraw_cancellation(server_number as i32)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
}
|
||||
170
packages/clients/hetznerclient/src/rhai/server_ordering.rs
Normal file
170
packages/clients/hetznerclient/src/rhai/server_ordering.rs
Normal file
@@ -0,0 +1,170 @@
|
||||
use crate::api::{
|
||||
Client,
|
||||
models::{
|
||||
AuctionServerProduct, AuctionTransaction, OrderAuctionServerBuilder, OrderServerBuilder,
|
||||
OrderServerProduct, ServerAddonProduct, ServerAddonTransaction, Transaction,
|
||||
},
|
||||
};
|
||||
use rhai::{Array, Dynamic, plugin::*};
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
let server_order_module = exported_module!(server_order_api);
|
||||
engine.register_global_module(server_order_module.into());
|
||||
}
|
||||
|
||||
#[export_module]
|
||||
pub mod server_order_api {
|
||||
use crate::api::models::OrderServerAddonBuilder;
|
||||
|
||||
#[rhai_fn(name = "get_server_products", return_raw)]
|
||||
pub fn get_server_ordering_product_overview(
|
||||
client: &mut Client,
|
||||
) -> Result<Array, Box<EvalAltResult>> {
|
||||
let overview_servers = client
|
||||
.get_server_products()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(overview_servers.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_server_product_by_id", return_raw)]
|
||||
pub fn get_server_ordering_product_by_id(
|
||||
client: &mut Client,
|
||||
product_id: &str,
|
||||
) -> Result<OrderServerProduct, Box<EvalAltResult>> {
|
||||
let product = client
|
||||
.get_server_product_by_id(product_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(product)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "order_server", return_raw)]
|
||||
pub fn order_server(
|
||||
client: &mut Client,
|
||||
order: OrderServerBuilder,
|
||||
) -> Result<Transaction, Box<EvalAltResult>> {
|
||||
let transaction = client
|
||||
.order_server(order)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_transaction_by_id", return_raw)]
|
||||
pub fn get_transaction_by_id(
|
||||
client: &mut Client,
|
||||
transaction_id: &str,
|
||||
) -> Result<Transaction, Box<EvalAltResult>> {
|
||||
let transaction = client
|
||||
.get_transaction_by_id(transaction_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_transactions", return_raw)]
|
||||
pub fn get_transactions(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let transactions = client
|
||||
.get_transactions()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transactions.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_auction_server_products", return_raw)]
|
||||
pub fn get_auction_server_products(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let products = client
|
||||
.get_auction_server_products()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(products.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_auction_server_product_by_id", return_raw)]
|
||||
pub fn get_auction_server_product_by_id(
|
||||
client: &mut Client,
|
||||
product_id: &str,
|
||||
) -> Result<AuctionServerProduct, Box<EvalAltResult>> {
|
||||
let product = client
|
||||
.get_auction_server_product_by_id(product_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(product)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_auction_transactions", return_raw)]
|
||||
pub fn get_auction_transactions(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let transactions = client
|
||||
.get_auction_transactions()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transactions.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_auction_transaction_by_id", return_raw)]
|
||||
pub fn get_auction_transaction_by_id(
|
||||
client: &mut Client,
|
||||
transaction_id: &str,
|
||||
) -> Result<AuctionTransaction, Box<EvalAltResult>> {
|
||||
let transaction = client
|
||||
.get_auction_transaction_by_id(transaction_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_server_addon_products", return_raw)]
|
||||
pub fn get_server_addon_products(
|
||||
client: &mut Client,
|
||||
server_number: i64,
|
||||
) -> Result<Array, Box<EvalAltResult>> {
|
||||
let products = client
|
||||
.get_server_addon_products(server_number)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(products.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_server_addon_transactions", return_raw)]
|
||||
pub fn get_server_addon_transactions(
|
||||
client: &mut Client,
|
||||
) -> Result<Array, Box<EvalAltResult>> {
|
||||
let transactions = client
|
||||
.get_server_addon_transactions()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transactions.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_server_addon_transaction_by_id", return_raw)]
|
||||
pub fn get_server_addon_transaction_by_id(
|
||||
client: &mut Client,
|
||||
transaction_id: &str,
|
||||
) -> Result<ServerAddonTransaction, Box<EvalAltResult>> {
|
||||
let transaction = client
|
||||
.get_server_addon_transaction_by_id(transaction_id)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "order_auction_server", return_raw)]
|
||||
pub fn order_auction_server(
|
||||
client: &mut Client,
|
||||
order: OrderAuctionServerBuilder,
|
||||
) -> Result<AuctionTransaction, Box<EvalAltResult>> {
|
||||
println!("Builder struct being used to order server: {:#?}", order);
|
||||
let transaction = client.order_auction_server(
|
||||
order.product_id,
|
||||
order.authorized_keys.unwrap_or(vec![]),
|
||||
order.dist,
|
||||
None,
|
||||
order.lang,
|
||||
order.comment,
|
||||
order.addon,
|
||||
order.test,
|
||||
).map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "order_server_addon", return_raw)]
|
||||
pub fn order_server_addon(
|
||||
client: &mut Client,
|
||||
order: OrderServerAddonBuilder,
|
||||
) -> Result<ServerAddonTransaction, Box<EvalAltResult>> {
|
||||
println!("Builder struct being used to order server addon: {:#?}", order);
|
||||
let transaction = client
|
||||
.order_server_addon(order)
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(transaction)
|
||||
}
|
||||
}
|
||||
89
packages/clients/hetznerclient/src/rhai/ssh_keys.rs
Normal file
89
packages/clients/hetznerclient/src/rhai/ssh_keys.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use crate::api::{Client, models::SshKey};
|
||||
use prettytable::{Table, row};
|
||||
use rhai::{Array, Dynamic, Engine, plugin::*};
|
||||
|
||||
pub fn register(engine: &mut Engine) {
|
||||
let ssh_keys_module = exported_module!(ssh_keys_api);
|
||||
engine.register_global_module(ssh_keys_module.into());
|
||||
}
|
||||
|
||||
#[export_module]
|
||||
pub mod ssh_keys_api {
|
||||
use super::*;
|
||||
use rhai::EvalAltResult;
|
||||
|
||||
#[rhai_fn(name = "get_ssh_keys", return_raw)]
|
||||
pub fn get_ssh_keys(client: &mut Client) -> Result<Array, Box<EvalAltResult>> {
|
||||
let ssh_keys = client
|
||||
.get_ssh_keys()
|
||||
.map_err(|e| Into::<Box<EvalAltResult>>::into(e.to_string()))?;
|
||||
Ok(ssh_keys.into_iter().map(Dynamic::from).collect())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "get_ssh_key", return_raw)]
|
||||
pub fn get_ssh_key(
|
||||
client: &mut Client,
|
||||
fingerprint: &str,
|
||||
) -> Result<SshKey, Box<EvalAltResult>> {
|
||||
client
|
||||
.get_ssh_key(fingerprint)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "add_ssh_key", return_raw)]
|
||||
pub fn add_ssh_key(
|
||||
client: &mut Client,
|
||||
name: &str,
|
||||
data: &str,
|
||||
) -> Result<SshKey, Box<EvalAltResult>> {
|
||||
client
|
||||
.add_ssh_key(name, data)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "update_ssh_key_name", return_raw)]
|
||||
pub fn update_ssh_key_name(
|
||||
client: &mut Client,
|
||||
fingerprint: &str,
|
||||
name: &str,
|
||||
) -> Result<SshKey, Box<EvalAltResult>> {
|
||||
client
|
||||
.update_ssh_key_name(fingerprint, name)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "delete_ssh_key", return_raw)]
|
||||
pub fn delete_ssh_key(
|
||||
client: &mut Client,
|
||||
fingerprint: &str,
|
||||
) -> Result<(), Box<EvalAltResult>> {
|
||||
client
|
||||
.delete_ssh_key(fingerprint)
|
||||
.map_err(|e| e.to_string().into())
|
||||
}
|
||||
|
||||
#[rhai_fn(name = "pretty_print")]
|
||||
pub fn pretty_print_ssh_keys(keys: Array) {
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![b =>
|
||||
"Name",
|
||||
"Fingerprint",
|
||||
"Type",
|
||||
"Size",
|
||||
"Created At"
|
||||
]);
|
||||
|
||||
for key_dyn in keys {
|
||||
if let Some(key) = key_dyn.try_cast::<SshKey>() {
|
||||
table.add_row(row![
|
||||
key.name,
|
||||
key.fingerprint,
|
||||
key.key_type,
|
||||
key.size.to_string(),
|
||||
key.created_at
|
||||
]);
|
||||
}
|
||||
}
|
||||
table.printstd();
|
||||
}
|
||||
}
|
||||
26
packages/clients/rfsclient/Cargo.toml
Normal file
26
packages/clients/rfsclient/Cargo.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
[package]
|
||||
name = "sal-rfs-client"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "SAL RFS Client - Client library for Remote File System server"
|
||||
repository = "https://git.threefold.info/herocode/sal"
|
||||
license = "Apache-2.0"
|
||||
keywords = ["rfs", "client", "filesystem", "remote"]
|
||||
categories = ["filesystem", "api-bindings"]
|
||||
|
||||
[dependencies]
|
||||
openapi = { path = "./openapi" }
|
||||
thiserror.workspace = true
|
||||
url.workspace = true
|
||||
reqwest = { workspace = true, features = ["json", "multipart"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json.workspace = true
|
||||
log.workspace = true
|
||||
bytes.workspace = true
|
||||
futures.workspace = true
|
||||
rhai.workspace = true
|
||||
lazy_static.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.0"
|
||||
195
packages/clients/rfsclient/README.md
Normal file
195
packages/clients/rfsclient/README.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# RFS Client
|
||||
|
||||
A Rust client library for interacting with the Remote File System (RFS) server.
|
||||
|
||||
## Overview
|
||||
|
||||
This client library provides a user-friendly wrapper around the OpenAPI-generated client code. It offers high-level abstractions for common operations such as:
|
||||
|
||||
- Authentication and session management
|
||||
- File uploads and downloads with progress tracking
|
||||
- Block-level operations and verification
|
||||
- FList creation, monitoring, and management
|
||||
- Timeout configuration and error handling
|
||||
|
||||
## Structure
|
||||
|
||||
The library is organized as follows:
|
||||
|
||||
- `client.rs`: Main client implementation with methods for interacting with the RFS server
|
||||
- `error.rs`: Error types and handling
|
||||
- `types.rs`: Type definitions and utilities
|
||||
|
||||
## Quick Start
|
||||
|
||||
```rust
|
||||
use rfs_client::RfsClient;
|
||||
use rfs_client::types::{ClientConfig, Credentials};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a client with custom configuration
|
||||
let config = ClientConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
credentials: Some(Credentials {
|
||||
username: "user".to_string(),
|
||||
password: "password".to_string(),
|
||||
}),
|
||||
timeout_seconds: 60,
|
||||
};
|
||||
|
||||
let mut client = RfsClient::new(config);
|
||||
|
||||
// Authenticate
|
||||
client.authenticate().await?;
|
||||
println!("Authentication successful");
|
||||
|
||||
// Upload a file
|
||||
let file_path = "/path/to/file.txt";
|
||||
let file_hash = client.upload_file(file_path, None).await?;
|
||||
println!("File uploaded with hash: {}", file_hash);
|
||||
|
||||
// Download the file
|
||||
let output_path = "/path/to/output.txt";
|
||||
client.download_file(&file_hash, output_path, None).await?;
|
||||
println!("File downloaded to {}", output_path);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Feature Examples
|
||||
|
||||
### Authentication
|
||||
|
||||
```rust
|
||||
// Create a client with authentication
|
||||
let config = ClientConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
credentials: Some(Credentials {
|
||||
username: "user".to_string(),
|
||||
password: "password".to_string(),
|
||||
}),
|
||||
timeout_seconds: 30,
|
||||
};
|
||||
|
||||
let mut client = RfsClient::new(config);
|
||||
|
||||
// Authenticate with the server
|
||||
client.authenticate().await?;
|
||||
if client.is_authenticated() {
|
||||
println!("Authentication successful");
|
||||
}
|
||||
```
|
||||
|
||||
### File Management
|
||||
|
||||
```rust
|
||||
// Upload a file with options
|
||||
let upload_options = UploadOptions {
|
||||
chunk_size: Some(1024 * 1024), // 1MB chunks
|
||||
verify: true,
|
||||
};
|
||||
|
||||
let file_hash = client.upload_file("/path/to/file.txt", Some(upload_options)).await?;
|
||||
|
||||
// Download the file
|
||||
let download_options = DownloadOptions {
|
||||
verify: true,
|
||||
};
|
||||
|
||||
client.download_file(&file_hash, "/path/to/output.txt", Some(download_options)).await?;
|
||||
```
|
||||
|
||||
### FList Operations
|
||||
|
||||
```rust
|
||||
// Create an FList from a Docker image
|
||||
let options = FlistOptions {
|
||||
auth: None,
|
||||
username: None,
|
||||
password: None,
|
||||
email: None,
|
||||
server_address: Some("docker.io".to_string()),
|
||||
identity_token: None,
|
||||
registry_token: None,
|
||||
};
|
||||
|
||||
let job_id = client.create_flist("alpine:latest", Some(options)).await?;
|
||||
|
||||
// Wait for FList creation with progress tracking
|
||||
let wait_options = WaitOptions {
|
||||
timeout_seconds: 60,
|
||||
poll_interval_ms: 1000,
|
||||
progress_callback: Some(Box::new(|state| {
|
||||
println!("Progress: FList state is now {:?}", state);
|
||||
})),
|
||||
};
|
||||
|
||||
let final_state = client.wait_for_flist_creation(&job_id, Some(wait_options)).await?;
|
||||
|
||||
// List available FLists
|
||||
let flists = client.list_flists().await?;
|
||||
|
||||
// Preview an FList
|
||||
let preview = client.preview_flist("flists/user/alpine-latest.fl").await?;
|
||||
|
||||
// Download an FList
|
||||
client.download_flist("flists/user/alpine-latest.fl", "/tmp/downloaded_flist.fl").await?;
|
||||
```
|
||||
|
||||
### Block Management
|
||||
|
||||
```rust
|
||||
// List blocks
|
||||
let blocks_list = client.list_blocks(None).await?;
|
||||
|
||||
// Check if a block exists
|
||||
let exists = client.check_block("block_hash").await?;
|
||||
|
||||
// Get block content
|
||||
let block_content = client.get_block("block_hash").await?;
|
||||
|
||||
// Upload a block
|
||||
let block_hash = client.upload_block("file_hash", 0, data).await?;
|
||||
|
||||
// Verify blocks
|
||||
let request = VerifyBlocksRequest { blocks: verify_blocks };
|
||||
let verify_result = client.verify_blocks(request).await?;
|
||||
```
|
||||
|
||||
## Complete Examples
|
||||
|
||||
For more detailed examples, check the `examples` directory:
|
||||
|
||||
- `authentication.rs`: Authentication and health check examples
|
||||
- `file_management.rs`: File upload and download with verification
|
||||
- `flist_operations.rs`: Complete FList creation, monitoring, listing, preview, and download
|
||||
- `block_management.rs`: Block-level operations including listing, verification, and upload
|
||||
- `wait_for_flist.rs`: Advanced FList creation with progress monitoring
|
||||
|
||||
Run an example with:
|
||||
|
||||
```bash
|
||||
cargo run --example flist_operations
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
This library wraps the OpenAPI-generated client located in the `openapi` directory. The OpenAPI client was generated using the OpenAPI Generator CLI.
|
||||
|
||||
To build the library:
|
||||
|
||||
```bash
|
||||
cargo build
|
||||
```
|
||||
|
||||
To run tests:
|
||||
|
||||
```bash
|
||||
cargo test -- --test-threads=1
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
42
packages/clients/rfsclient/examples/authentication.rs
Normal file
42
packages/clients/rfsclient/examples/authentication.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use sal_rfs_client::types::{ClientConfig, Credentials};
|
||||
use sal_rfs_client::RfsClient;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a client with authentication credentials
|
||||
let config = ClientConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
credentials: Some(Credentials {
|
||||
username: "user".to_string(),
|
||||
password: "password".to_string(),
|
||||
}),
|
||||
timeout_seconds: 30,
|
||||
};
|
||||
|
||||
let mut client = RfsClient::new(config);
|
||||
println!("Client created with authentication credentials");
|
||||
|
||||
// Authenticate with the server
|
||||
client.authenticate().await?;
|
||||
if client.is_authenticated() {
|
||||
println!("Authentication successful");
|
||||
} else {
|
||||
println!("Authentication failed");
|
||||
}
|
||||
|
||||
// Create a client without authentication
|
||||
let config_no_auth = ClientConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
credentials: None,
|
||||
timeout_seconds: 30,
|
||||
};
|
||||
|
||||
let client_no_auth = RfsClient::new(config_no_auth);
|
||||
println!("Client created without authentication credentials");
|
||||
|
||||
// Check health endpoint (doesn't require authentication)
|
||||
let health = client_no_auth.health_check().await?;
|
||||
println!("Server health: {:?}", health);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
153
packages/clients/rfsclient/examples/block_management.rs
Normal file
153
packages/clients/rfsclient/examples/block_management.rs
Normal file
@@ -0,0 +1,153 @@
|
||||
use openapi::models::{VerifyBlock, VerifyBlocksRequest};
|
||||
use sal_rfs_client::types::{ClientConfig, Credentials};
|
||||
use sal_rfs_client::RfsClient;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a client with authentication
|
||||
let config = ClientConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
credentials: Some(Credentials {
|
||||
username: "user".to_string(),
|
||||
password: "password".to_string(),
|
||||
}),
|
||||
timeout_seconds: 60,
|
||||
};
|
||||
|
||||
let mut client = RfsClient::new(config);
|
||||
|
||||
// Authenticate with the server
|
||||
client.authenticate().await?;
|
||||
println!("Authentication successful");
|
||||
|
||||
// Create a test file to upload for block testing
|
||||
let test_file_path = "/tmp/block_test.txt";
|
||||
let test_content = "This is a test file for RFS client block management";
|
||||
std::fs::write(test_file_path, test_content)?;
|
||||
println!("Created test file at {}", test_file_path);
|
||||
|
||||
// Upload the file to get blocks
|
||||
println!("Uploading file to get blocks...");
|
||||
let file_hash = client.upload_file(test_file_path, None).await?;
|
||||
println!("File uploaded with hash: {}", file_hash);
|
||||
|
||||
// Get blocks by file hash
|
||||
println!("Getting blocks for file hash: {}", file_hash);
|
||||
let blocks = client.get_blocks_by_hash(&file_hash).await?;
|
||||
println!("Found {} blocks for the file", blocks.blocks.len());
|
||||
|
||||
// Print block information
|
||||
for (i, block_data) in blocks.blocks.iter().enumerate() {
|
||||
println!(
|
||||
"Block {}: Hash={}, Index={}",
|
||||
i, block_data.hash, block_data.index
|
||||
);
|
||||
}
|
||||
|
||||
// Verify blocks with complete information
|
||||
println!("Verifying blocks...");
|
||||
|
||||
// Create a list of VerifyBlock objects with complete information
|
||||
let verify_blocks = blocks
|
||||
.blocks
|
||||
.iter()
|
||||
.map(|block| {
|
||||
VerifyBlock {
|
||||
block_hash: block.hash.clone(),
|
||||
block_index: block.index,
|
||||
file_hash: file_hash.clone(), // Using the actual file hash
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Create the request with the complete block information
|
||||
for block in verify_blocks.iter() {
|
||||
println!("Block: {}", block.block_hash);
|
||||
println!("Block index: {}", block.block_index);
|
||||
println!("File hash: {}", block.file_hash);
|
||||
}
|
||||
let request = VerifyBlocksRequest {
|
||||
blocks: verify_blocks,
|
||||
};
|
||||
|
||||
// Send the verification request
|
||||
let verify_result = client.verify_blocks(request).await?;
|
||||
println!(
|
||||
"Verification result: {} missing blocks",
|
||||
verify_result.missing.len()
|
||||
);
|
||||
for block in verify_result.missing.iter() {
|
||||
println!("Missing block: {}", block);
|
||||
}
|
||||
|
||||
// List blocks (list_blocks_handler)
|
||||
println!("\n1. Listing all blocks with pagination...");
|
||||
let blocks_list = client.list_blocks(None).await?;
|
||||
println!("Server has {} blocks in total", blocks_list.len());
|
||||
if !blocks_list.is_empty() {
|
||||
let first_few = blocks_list
|
||||
.iter()
|
||||
.take(3)
|
||||
.map(|s| s.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
println!("First few blocks: {}", first_few);
|
||||
}
|
||||
|
||||
// Check if a block exists (check_block_handler)
|
||||
if !blocks.blocks.is_empty() {
|
||||
let block_to_check = &blocks.blocks[0].hash;
|
||||
println!("\n2. Checking if block exists: {}", block_to_check);
|
||||
let exists = client.check_block(block_to_check).await?;
|
||||
println!("Block exists: {}", exists);
|
||||
}
|
||||
|
||||
// Get block downloads statistics (get_block_downloads_handler)
|
||||
if !blocks.blocks.is_empty() {
|
||||
let block_to_check = &blocks.blocks[0].hash;
|
||||
println!(
|
||||
"\n3. Getting download statistics for block: {}",
|
||||
block_to_check
|
||||
);
|
||||
let downloads = client.get_block_downloads(block_to_check).await?;
|
||||
println!(
|
||||
"Block has been downloaded {} times",
|
||||
downloads.downloads_count
|
||||
);
|
||||
}
|
||||
|
||||
// Get a specific block content (get_block_handler)
|
||||
if !blocks.blocks.is_empty() {
|
||||
let block_to_get = &blocks.blocks[0].hash;
|
||||
println!("\n4. Getting content for block: {}", block_to_get);
|
||||
let block_content = client.get_block(block_to_get).await?;
|
||||
println!("Retrieved block with {} bytes", block_content.len());
|
||||
}
|
||||
|
||||
// Get user blocks (get_user_blocks_handler)
|
||||
println!("\n6. Listing user blocks...");
|
||||
let user_blocks = client.get_user_blocks(Some(1), Some(10)).await?;
|
||||
println!(
|
||||
"User has {} blocks (showing page 1 with 10 per page)",
|
||||
user_blocks.total
|
||||
);
|
||||
for block in user_blocks.blocks.iter().take(3) {
|
||||
println!(" - Block: {}, Size: {}", block.hash, block.size);
|
||||
}
|
||||
|
||||
// Upload a block (upload_block_handler)
|
||||
println!("\n7. Uploading a new test block...");
|
||||
let test_block_data = b"This is test block data for direct block upload";
|
||||
let new_file_hash = "test_file_hash_for_block_upload";
|
||||
let block_index = 0;
|
||||
let block_hash = client
|
||||
.upload_block(new_file_hash, block_index, test_block_data.to_vec())
|
||||
.await?;
|
||||
println!("Uploaded block with hash: {}", block_hash);
|
||||
|
||||
// Clean up
|
||||
std::fs::remove_file(test_file_path)?;
|
||||
println!("Test file cleaned up");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
66
packages/clients/rfsclient/examples/file_management.rs
Normal file
66
packages/clients/rfsclient/examples/file_management.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use sal_rfs_client::types::{ClientConfig, Credentials, DownloadOptions, UploadOptions};
|
||||
use sal_rfs_client::RfsClient;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a client with authentication
|
||||
let config = ClientConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
credentials: Some(Credentials {
|
||||
username: "user".to_string(),
|
||||
password: "password".to_string(),
|
||||
}),
|
||||
timeout_seconds: 60,
|
||||
};
|
||||
|
||||
let mut client = RfsClient::new(config);
|
||||
|
||||
// Authenticate with the server
|
||||
client.authenticate().await?;
|
||||
println!("Authentication successful");
|
||||
|
||||
// Create a test file to upload
|
||||
let test_file_path = "/tmp/test_upload.txt";
|
||||
std::fs::write(test_file_path, "This is a test file for RFS client upload")?;
|
||||
println!("Created test file at {}", test_file_path);
|
||||
|
||||
// Upload the file with options
|
||||
println!("Uploading file...");
|
||||
let upload_options = UploadOptions {
|
||||
chunk_size: Some(1024 * 1024), // 1MB chunks
|
||||
verify: true,
|
||||
};
|
||||
|
||||
let file_hash = client
|
||||
.upload_file(test_file_path, Some(upload_options))
|
||||
.await?;
|
||||
println!("File uploaded with hash: {}", file_hash);
|
||||
|
||||
// Download the file
|
||||
let download_path = "/tmp/test_download.txt";
|
||||
println!("Downloading file to {}...", download_path);
|
||||
|
||||
let download_options = DownloadOptions { verify: true };
|
||||
|
||||
client
|
||||
.download_file(&file_hash, download_path, Some(download_options))
|
||||
.await?;
|
||||
println!("File downloaded to {}", download_path);
|
||||
|
||||
// Verify the downloaded file matches the original
|
||||
let original_content = std::fs::read_to_string(test_file_path)?;
|
||||
let downloaded_content = std::fs::read_to_string(download_path)?;
|
||||
|
||||
if original_content == downloaded_content {
|
||||
println!("File contents match! Download successful.");
|
||||
} else {
|
||||
println!("ERROR: File contents do not match!");
|
||||
}
|
||||
|
||||
// Clean up test files
|
||||
std::fs::remove_file(test_file_path)?;
|
||||
std::fs::remove_file(download_path)?;
|
||||
println!("Test files cleaned up");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
176
packages/clients/rfsclient/examples/flist_operations.rs
Normal file
176
packages/clients/rfsclient/examples/flist_operations.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use sal_rfs_client::types::{ClientConfig, Credentials, FlistOptions, WaitOptions};
|
||||
use sal_rfs_client::RfsClient;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let parent_dir = "flists";
|
||||
// Create a client with authentication
|
||||
let config = ClientConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
credentials: Some(Credentials {
|
||||
username: "user".to_string(),
|
||||
password: "password".to_string(),
|
||||
}),
|
||||
timeout_seconds: 60,
|
||||
};
|
||||
|
||||
let mut client = RfsClient::new(config);
|
||||
|
||||
// Authenticate with the server
|
||||
client.authenticate().await?;
|
||||
println!("Authentication successful");
|
||||
|
||||
println!("\n1. CREATE FLIST - Creating an FList from a Docker image");
|
||||
let image_name = "alpine:latest";
|
||||
println!("Creating FList for image: {}", image_name);
|
||||
|
||||
// Use FlistOptions to specify additional parameters
|
||||
let options = FlistOptions {
|
||||
auth: None,
|
||||
username: None,
|
||||
password: None,
|
||||
email: None,
|
||||
server_address: Some("docker.io".to_string()),
|
||||
identity_token: None,
|
||||
registry_token: None,
|
||||
};
|
||||
|
||||
// Create the FList and handle potential conflict error
|
||||
let job_id = match client.create_flist(&image_name, Some(options)).await {
|
||||
Ok(id) => {
|
||||
println!("FList creation started with job ID: {}", id);
|
||||
Some(id)
|
||||
}
|
||||
Err(e) => {
|
||||
if e.to_string().contains("Conflict") {
|
||||
println!("FList already exists");
|
||||
None
|
||||
} else {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// 2. Check FList state if we have a job ID
|
||||
if let Some(job_id) = &job_id {
|
||||
println!("\n2. GET FLIST STATE - Checking FList creation state");
|
||||
let state = client.get_flist_state(job_id).await?;
|
||||
println!("Current FList state: {:?}", state.flist_state);
|
||||
|
||||
// 3. Wait for FList creation with progress reporting
|
||||
println!("\n3. WAIT FOR FLIST CREATION - Waiting for FList to be created with progress reporting");
|
||||
let wait_options = WaitOptions {
|
||||
timeout_seconds: 60, // Shorter timeout for the example
|
||||
poll_interval_ms: 1000,
|
||||
progress_callback: Some(Box::new(|state| {
|
||||
println!("Progress: FList state is now {:?}", state);
|
||||
// No return value needed (returns unit type)
|
||||
})),
|
||||
};
|
||||
|
||||
// Wait for the FList to be created (with a timeout)
|
||||
match client
|
||||
.wait_for_flist_creation(job_id, Some(wait_options))
|
||||
.await
|
||||
{
|
||||
Ok(final_state) => {
|
||||
println!("FList creation completed with state: {:?}", final_state);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Error waiting for FList creation: {}", e);
|
||||
// Continue with the example even if waiting fails
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// 4. List all available FLists
|
||||
println!("\n4. LIST FLISTS - Listing all available FLists");
|
||||
|
||||
// Variable to store the FList path for preview and download
|
||||
let mut flist_path_for_preview: Option<String> = None;
|
||||
|
||||
match client.list_flists().await {
|
||||
Ok(flists) => {
|
||||
println!("Found {} FList categories", flists.len());
|
||||
|
||||
for (category, files) in &flists {
|
||||
println!("Category: {}", category);
|
||||
for file in files.iter().take(2) {
|
||||
// Show only first 2 files per category
|
||||
println!(" - {} (size: {} bytes)", file.name, file.size);
|
||||
|
||||
// Save the first FList path for preview
|
||||
if flist_path_for_preview.is_none() {
|
||||
let path = format!("{}/{}/{}", parent_dir, category, file.name);
|
||||
flist_path_for_preview = Some(path);
|
||||
}
|
||||
}
|
||||
if files.len() > 2 {
|
||||
println!(" - ... and {} more files", files.len() - 2);
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Preview an FList if we found one
|
||||
if let Some(ref flist_path) = flist_path_for_preview {
|
||||
println!("\n5. PREVIEW FLIST - Previewing FList: {}", flist_path);
|
||||
match client.preview_flist(flist_path).await {
|
||||
Ok(preview) => {
|
||||
println!("FList preview for {}:", flist_path);
|
||||
println!(" - Checksum: {}", preview.checksum);
|
||||
println!(" - Metadata: {}", preview.metadata);
|
||||
|
||||
// Display content (list of strings)
|
||||
if !preview.content.is_empty() {
|
||||
println!(" - Content entries:");
|
||||
for (i, entry) in preview.content.iter().enumerate().take(5) {
|
||||
println!(" {}. {}", i + 1, entry);
|
||||
}
|
||||
if preview.content.len() > 5 {
|
||||
println!(" ... and {} more entries", preview.content.len() - 5);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => println!("Error previewing FList: {}", e),
|
||||
}
|
||||
} else {
|
||||
println!("No FLists available for preview");
|
||||
}
|
||||
}
|
||||
Err(e) => println!("Error listing FLists: {}", e),
|
||||
}
|
||||
|
||||
// 6. DOWNLOAD FLIST - Downloading an FList to a local file
|
||||
if let Some(ref flist_path) = flist_path_for_preview {
|
||||
println!("\n6. DOWNLOAD FLIST - Downloading FList: {}", flist_path);
|
||||
|
||||
// Create a temporary output path for the downloaded FList
|
||||
let output_path = "/tmp/downloaded_flist.fl";
|
||||
|
||||
match client.download_flist(flist_path, output_path).await {
|
||||
Ok(_) => {
|
||||
println!("FList successfully downloaded to {}", output_path);
|
||||
|
||||
// Get file size
|
||||
match std::fs::metadata(output_path) {
|
||||
Ok(metadata) => println!("Downloaded file size: {} bytes", metadata.len()),
|
||||
Err(e) => println!("Error getting file metadata: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => println!("Error downloading FList: {}", e),
|
||||
}
|
||||
} else {
|
||||
println!("\n6. DOWNLOAD FLIST - No FList available for download");
|
||||
}
|
||||
|
||||
println!("\nAll FList operations demonstrated:");
|
||||
println!("1. create_flist - Create a new FList from a Docker image");
|
||||
println!("2. get_flist_state - Check the state of an FList creation job");
|
||||
println!(
|
||||
"3. wait_for_flist_creation - Wait for an FList to be created with progress reporting"
|
||||
);
|
||||
println!("4. list_flists - List all available FLists");
|
||||
println!("5. preview_flist - Preview the content of an FList");
|
||||
println!("6. download_flist - Download an FList to a local file");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
64
packages/clients/rfsclient/examples/wait_for_flist.rs
Normal file
64
packages/clients/rfsclient/examples/wait_for_flist.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use openapi::models::FlistState;
|
||||
use sal_rfs_client::types::{ClientConfig, Credentials, WaitOptions};
|
||||
use sal_rfs_client::RfsClient;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a client with authentication
|
||||
let config = ClientConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
credentials: Some(Credentials {
|
||||
username: "user".to_string(),
|
||||
password: "password".to_string(),
|
||||
}),
|
||||
timeout_seconds: 60,
|
||||
};
|
||||
|
||||
let mut client = RfsClient::new(config);
|
||||
|
||||
// Authenticate with the server
|
||||
client.authenticate().await?;
|
||||
println!("Authentication successful");
|
||||
|
||||
// Create an FList from a Docker image
|
||||
let image_name = "redis:latest";
|
||||
println!("Creating FList for image: {}", image_name);
|
||||
|
||||
let job_id = client.create_flist(&image_name, None).await?;
|
||||
println!("FList creation started with job ID: {}", job_id);
|
||||
|
||||
// Set up options for waiting with progress reporting
|
||||
let options = WaitOptions {
|
||||
timeout_seconds: 600, // 10 minutes timeout
|
||||
poll_interval_ms: 2000, // Check every 2 seconds
|
||||
progress_callback: Some(Box::new(|state| match state {
|
||||
FlistState::FlistStateInProgress(info) => {
|
||||
println!(
|
||||
"Progress: {:.1}% - {}",
|
||||
info.in_progress.progress, info.in_progress.msg
|
||||
);
|
||||
}
|
||||
FlistState::FlistStateStarted(_) => {
|
||||
println!("FList creation started...");
|
||||
}
|
||||
FlistState::FlistStateAccepted(_) => {
|
||||
println!("FList creation request accepted...");
|
||||
}
|
||||
_ => println!("State: {:?}", state),
|
||||
})),
|
||||
};
|
||||
|
||||
// Wait for the FList to be created
|
||||
println!("Waiting for FList creation to complete...");
|
||||
|
||||
// Use ? operator to propagate errors properly
|
||||
let state = client
|
||||
.wait_for_flist_creation(&job_id, Some(options))
|
||||
.await
|
||||
.map_err(|e| -> Box<dyn std::error::Error> { Box::new(e) })?;
|
||||
|
||||
println!("FList created successfully!");
|
||||
println!("Final state: {:?}", state);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
1
packages/clients/rfsclient/openapi.json
Normal file
1
packages/clients/rfsclient/openapi.json
Normal file
File diff suppressed because one or more lines are too long
3
packages/clients/rfsclient/openapi/.gitignore
vendored
Normal file
3
packages/clients/rfsclient/openapi/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
/target/
|
||||
**/*.rs.bk
|
||||
Cargo.lock
|
||||
23
packages/clients/rfsclient/openapi/.openapi-generator-ignore
Normal file
23
packages/clients/rfsclient/openapi/.openapi-generator-ignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# OpenAPI Generator Ignore
|
||||
# Generated by openapi-generator https://github.com/openapitools/openapi-generator
|
||||
|
||||
# Use this file to prevent files from being overwritten by the generator.
|
||||
# The patterns follow closely to .gitignore or .dockerignore.
|
||||
|
||||
# As an example, the C# client generator defines ApiClient.cs.
|
||||
# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line:
|
||||
#ApiClient.cs
|
||||
|
||||
# You can match any string of characters against a directory, file or extension with a single asterisk (*):
|
||||
#foo/*/qux
|
||||
# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux
|
||||
|
||||
# You can recursively match patterns against a directory, file or extension with a double asterisk (**):
|
||||
#foo/**/qux
|
||||
# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux
|
||||
|
||||
# You can also negate patterns with an exclamation (!).
|
||||
# For example, you can ignore all files in a docs folder with the file extension .md:
|
||||
#docs/*.md
|
||||
# Then explicitly reverse the ignore rule for a single file:
|
||||
#!docs/README.md
|
||||
125
packages/clients/rfsclient/openapi/.openapi-generator/FILES
Normal file
125
packages/clients/rfsclient/openapi/.openapi-generator/FILES
Normal file
@@ -0,0 +1,125 @@
|
||||
.gitignore
|
||||
.travis.yml
|
||||
Cargo.toml
|
||||
README.md
|
||||
docs/AuthenticationApi.md
|
||||
docs/BlockDownloadsResponse.md
|
||||
docs/BlockInfo.md
|
||||
docs/BlockManagementApi.md
|
||||
docs/BlockUploadedResponse.md
|
||||
docs/BlocksResponse.md
|
||||
docs/DirListTemplate.md
|
||||
docs/DirLister.md
|
||||
docs/ErrorTemplate.md
|
||||
docs/FileDownloadRequest.md
|
||||
docs/FileInfo.md
|
||||
docs/FileManagementApi.md
|
||||
docs/FileUploadResponse.md
|
||||
docs/FlistBody.md
|
||||
docs/FlistManagementApi.md
|
||||
docs/FlistState.md
|
||||
docs/FlistStateAccepted.md
|
||||
docs/FlistStateCreated.md
|
||||
docs/FlistStateInProgress.md
|
||||
docs/FlistStateInfo.md
|
||||
docs/FlistStateResponse.md
|
||||
docs/FlistStateStarted.md
|
||||
docs/HealthResponse.md
|
||||
docs/Job.md
|
||||
docs/ListBlocksParams.md
|
||||
docs/ListBlocksResponse.md
|
||||
docs/PreviewResponse.md
|
||||
docs/ResponseError.md
|
||||
docs/ResponseErrorBadRequest.md
|
||||
docs/ResponseErrorConflict.md
|
||||
docs/ResponseErrorForbidden.md
|
||||
docs/ResponseErrorNotFound.md
|
||||
docs/ResponseErrorTemplateError.md
|
||||
docs/ResponseErrorUnauthorized.md
|
||||
docs/ResponseResult.md
|
||||
docs/ResponseResultBlockUploaded.md
|
||||
docs/ResponseResultDirTemplate.md
|
||||
docs/ResponseResultFileUploaded.md
|
||||
docs/ResponseResultFlistCreated.md
|
||||
docs/ResponseResultFlistState.md
|
||||
docs/ResponseResultFlists.md
|
||||
docs/ResponseResultPreviewFlist.md
|
||||
docs/ResponseResultRes.md
|
||||
docs/ResponseResultSignedIn.md
|
||||
docs/SignInBody.md
|
||||
docs/SignInResponse.md
|
||||
docs/SystemApi.md
|
||||
docs/TemplateErr.md
|
||||
docs/TemplateErrBadRequest.md
|
||||
docs/TemplateErrInternalServerError.md
|
||||
docs/TemplateErrNotFound.md
|
||||
docs/UploadBlockParams.md
|
||||
docs/UserBlockInfo.md
|
||||
docs/UserBlocksResponse.md
|
||||
docs/VerifyBlock.md
|
||||
docs/VerifyBlocksRequest.md
|
||||
docs/VerifyBlocksResponse.md
|
||||
docs/WebsiteServingApi.md
|
||||
git_push.sh
|
||||
src/apis/authentication_api.rs
|
||||
src/apis/block_management_api.rs
|
||||
src/apis/configuration.rs
|
||||
src/apis/file_management_api.rs
|
||||
src/apis/flist_management_api.rs
|
||||
src/apis/mod.rs
|
||||
src/apis/system_api.rs
|
||||
src/apis/website_serving_api.rs
|
||||
src/lib.rs
|
||||
src/models/block_downloads_response.rs
|
||||
src/models/block_info.rs
|
||||
src/models/block_uploaded_response.rs
|
||||
src/models/blocks_response.rs
|
||||
src/models/dir_list_template.rs
|
||||
src/models/dir_lister.rs
|
||||
src/models/error_template.rs
|
||||
src/models/file_download_request.rs
|
||||
src/models/file_info.rs
|
||||
src/models/file_upload_response.rs
|
||||
src/models/flist_body.rs
|
||||
src/models/flist_state.rs
|
||||
src/models/flist_state_accepted.rs
|
||||
src/models/flist_state_created.rs
|
||||
src/models/flist_state_in_progress.rs
|
||||
src/models/flist_state_info.rs
|
||||
src/models/flist_state_response.rs
|
||||
src/models/flist_state_started.rs
|
||||
src/models/health_response.rs
|
||||
src/models/job.rs
|
||||
src/models/list_blocks_params.rs
|
||||
src/models/list_blocks_response.rs
|
||||
src/models/mod.rs
|
||||
src/models/preview_response.rs
|
||||
src/models/response_error.rs
|
||||
src/models/response_error_bad_request.rs
|
||||
src/models/response_error_conflict.rs
|
||||
src/models/response_error_forbidden.rs
|
||||
src/models/response_error_not_found.rs
|
||||
src/models/response_error_template_error.rs
|
||||
src/models/response_error_unauthorized.rs
|
||||
src/models/response_result.rs
|
||||
src/models/response_result_block_uploaded.rs
|
||||
src/models/response_result_dir_template.rs
|
||||
src/models/response_result_file_uploaded.rs
|
||||
src/models/response_result_flist_created.rs
|
||||
src/models/response_result_flist_state.rs
|
||||
src/models/response_result_flists.rs
|
||||
src/models/response_result_preview_flist.rs
|
||||
src/models/response_result_res.rs
|
||||
src/models/response_result_signed_in.rs
|
||||
src/models/sign_in_body.rs
|
||||
src/models/sign_in_response.rs
|
||||
src/models/template_err.rs
|
||||
src/models/template_err_bad_request.rs
|
||||
src/models/template_err_internal_server_error.rs
|
||||
src/models/template_err_not_found.rs
|
||||
src/models/upload_block_params.rs
|
||||
src/models/user_block_info.rs
|
||||
src/models/user_blocks_response.rs
|
||||
src/models/verify_block.rs
|
||||
src/models/verify_blocks_request.rs
|
||||
src/models/verify_blocks_response.rs
|
||||
@@ -0,0 +1 @@
|
||||
7.13.0
|
||||
1
packages/clients/rfsclient/openapi/.travis.yml
Normal file
1
packages/clients/rfsclient/openapi/.travis.yml
Normal file
@@ -0,0 +1 @@
|
||||
language: rust
|
||||
15
packages/clients/rfsclient/openapi/Cargo.toml
Normal file
15
packages/clients/rfsclient/openapi/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "openapi"
|
||||
version = "0.2.0"
|
||||
authors = ["OpenAPI Generator team and contributors"]
|
||||
description = "No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)"
|
||||
license = ""
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "^1.0", features = ["derive"] }
|
||||
serde_with = { version = "^3.8", default-features = false, features = ["base64", "std", "macros"] }
|
||||
serde_json = "^1.0"
|
||||
serde_repr = "^0.1"
|
||||
url = "^2.5"
|
||||
reqwest = { version = "^0.12", default-features = false, features = ["json", "multipart"] }
|
||||
114
packages/clients/rfsclient/openapi/README.md
Normal file
114
packages/clients/rfsclient/openapi/README.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Rust API client for openapi
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project. By using the [openapi-spec](https://openapis.org) from a remote server, you can easily generate an API client.
|
||||
|
||||
- API version: 0.2.0
|
||||
- Package version: 0.2.0
|
||||
- Generator version: 7.13.0
|
||||
- Build package: `org.openapitools.codegen.languages.RustClientCodegen`
|
||||
|
||||
## Installation
|
||||
|
||||
Put the package under your project folder in a directory named `openapi` and add the following to `Cargo.toml` under `[dependencies]`:
|
||||
|
||||
```
|
||||
openapi = { path = "./openapi" }
|
||||
```
|
||||
|
||||
## Documentation for API Endpoints
|
||||
|
||||
All URIs are relative to *http://localhost*
|
||||
|
||||
Class | Method | HTTP request | Description
|
||||
------------ | ------------- | ------------- | -------------
|
||||
*AuthenticationApi* | [**sign_in_handler**](docs/AuthenticationApi.md#sign_in_handler) | **POST** /api/v1/signin |
|
||||
*BlockManagementApi* | [**check_block_handler**](docs/BlockManagementApi.md#check_block_handler) | **HEAD** /api/v1/block/{hash} | Checks a block by its hash.
|
||||
*BlockManagementApi* | [**get_block_downloads_handler**](docs/BlockManagementApi.md#get_block_downloads_handler) | **GET** /api/v1/block/{hash}/downloads | Retrieve the number of times a block has been downloaded.
|
||||
*BlockManagementApi* | [**get_block_handler**](docs/BlockManagementApi.md#get_block_handler) | **GET** /api/v1/block/{hash} | Retrieve a block by its hash.
|
||||
*BlockManagementApi* | [**get_blocks_by_hash_handler**](docs/BlockManagementApi.md#get_blocks_by_hash_handler) | **GET** /api/v1/blocks/{hash} | Retrieve blocks by hash (file hash or block hash).
|
||||
*BlockManagementApi* | [**get_user_blocks_handler**](docs/BlockManagementApi.md#get_user_blocks_handler) | **GET** /api/v1/user/blocks | Retrieve all blocks uploaded by a specific user.
|
||||
*BlockManagementApi* | [**list_blocks_handler**](docs/BlockManagementApi.md#list_blocks_handler) | **GET** /api/v1/blocks | List all block hashes in the server with pagination
|
||||
*BlockManagementApi* | [**upload_block_handler**](docs/BlockManagementApi.md#upload_block_handler) | **POST** /api/v1/block | Upload a block to the server.
|
||||
*BlockManagementApi* | [**verify_blocks_handler**](docs/BlockManagementApi.md#verify_blocks_handler) | **POST** /api/v1/block/verify | Verify if multiple blocks exist on the server.
|
||||
*FileManagementApi* | [**get_file_handler**](docs/FileManagementApi.md#get_file_handler) | **GET** /api/v1/file/{hash} | Retrieve a file by its hash from path, with optional custom filename in request body.
|
||||
*FileManagementApi* | [**upload_file_handler**](docs/FileManagementApi.md#upload_file_handler) | **POST** /api/v1/file | Upload a file to the server.
|
||||
*FlistManagementApi* | [**create_flist_handler**](docs/FlistManagementApi.md#create_flist_handler) | **POST** /api/v1/fl |
|
||||
*FlistManagementApi* | [**get_flist_state_handler**](docs/FlistManagementApi.md#get_flist_state_handler) | **GET** /api/v1/fl/{job_id} |
|
||||
*FlistManagementApi* | [**list_flists_handler**](docs/FlistManagementApi.md#list_flists_handler) | **GET** /api/v1/fl |
|
||||
*FlistManagementApi* | [**preview_flist_handler**](docs/FlistManagementApi.md#preview_flist_handler) | **GET** /api/v1/fl/preview/{flist_path} |
|
||||
*FlistManagementApi* | [**serve_flists**](docs/FlistManagementApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem
|
||||
*SystemApi* | [**health_check_handler**](docs/SystemApi.md#health_check_handler) | **GET** /api/v1 |
|
||||
*WebsiteServingApi* | [**serve_website_handler**](docs/WebsiteServingApi.md#serve_website_handler) | **GET** /api/v1/website/{website_hash}/{path} |
|
||||
|
||||
|
||||
## Documentation For Models
|
||||
|
||||
- [BlockDownloadsResponse](docs/BlockDownloadsResponse.md)
|
||||
- [BlockInfo](docs/BlockInfo.md)
|
||||
- [BlockUploadedResponse](docs/BlockUploadedResponse.md)
|
||||
- [BlocksResponse](docs/BlocksResponse.md)
|
||||
- [DirListTemplate](docs/DirListTemplate.md)
|
||||
- [DirLister](docs/DirLister.md)
|
||||
- [ErrorTemplate](docs/ErrorTemplate.md)
|
||||
- [FileDownloadRequest](docs/FileDownloadRequest.md)
|
||||
- [FileInfo](docs/FileInfo.md)
|
||||
- [FileUploadResponse](docs/FileUploadResponse.md)
|
||||
- [FlistBody](docs/FlistBody.md)
|
||||
- [FlistState](docs/FlistState.md)
|
||||
- [FlistStateAccepted](docs/FlistStateAccepted.md)
|
||||
- [FlistStateCreated](docs/FlistStateCreated.md)
|
||||
- [FlistStateInProgress](docs/FlistStateInProgress.md)
|
||||
- [FlistStateInfo](docs/FlistStateInfo.md)
|
||||
- [FlistStateResponse](docs/FlistStateResponse.md)
|
||||
- [FlistStateStarted](docs/FlistStateStarted.md)
|
||||
- [HealthResponse](docs/HealthResponse.md)
|
||||
- [Job](docs/Job.md)
|
||||
- [ListBlocksParams](docs/ListBlocksParams.md)
|
||||
- [ListBlocksResponse](docs/ListBlocksResponse.md)
|
||||
- [PreviewResponse](docs/PreviewResponse.md)
|
||||
- [ResponseError](docs/ResponseError.md)
|
||||
- [ResponseErrorBadRequest](docs/ResponseErrorBadRequest.md)
|
||||
- [ResponseErrorConflict](docs/ResponseErrorConflict.md)
|
||||
- [ResponseErrorForbidden](docs/ResponseErrorForbidden.md)
|
||||
- [ResponseErrorNotFound](docs/ResponseErrorNotFound.md)
|
||||
- [ResponseErrorTemplateError](docs/ResponseErrorTemplateError.md)
|
||||
- [ResponseErrorUnauthorized](docs/ResponseErrorUnauthorized.md)
|
||||
- [ResponseResult](docs/ResponseResult.md)
|
||||
- [ResponseResultBlockUploaded](docs/ResponseResultBlockUploaded.md)
|
||||
- [ResponseResultDirTemplate](docs/ResponseResultDirTemplate.md)
|
||||
- [ResponseResultFileUploaded](docs/ResponseResultFileUploaded.md)
|
||||
- [ResponseResultFlistCreated](docs/ResponseResultFlistCreated.md)
|
||||
- [ResponseResultFlistState](docs/ResponseResultFlistState.md)
|
||||
- [ResponseResultFlists](docs/ResponseResultFlists.md)
|
||||
- [ResponseResultPreviewFlist](docs/ResponseResultPreviewFlist.md)
|
||||
- [ResponseResultRes](docs/ResponseResultRes.md)
|
||||
- [ResponseResultSignedIn](docs/ResponseResultSignedIn.md)
|
||||
- [SignInBody](docs/SignInBody.md)
|
||||
- [SignInResponse](docs/SignInResponse.md)
|
||||
- [TemplateErr](docs/TemplateErr.md)
|
||||
- [TemplateErrBadRequest](docs/TemplateErrBadRequest.md)
|
||||
- [TemplateErrInternalServerError](docs/TemplateErrInternalServerError.md)
|
||||
- [TemplateErrNotFound](docs/TemplateErrNotFound.md)
|
||||
- [UploadBlockParams](docs/UploadBlockParams.md)
|
||||
- [UserBlockInfo](docs/UserBlockInfo.md)
|
||||
- [UserBlocksResponse](docs/UserBlocksResponse.md)
|
||||
- [VerifyBlock](docs/VerifyBlock.md)
|
||||
- [VerifyBlocksRequest](docs/VerifyBlocksRequest.md)
|
||||
- [VerifyBlocksResponse](docs/VerifyBlocksResponse.md)
|
||||
|
||||
|
||||
To get access to the crate's generated documentation, use:
|
||||
|
||||
```
|
||||
cargo doc --open
|
||||
```
|
||||
|
||||
## Author
|
||||
|
||||
|
||||
|
||||
37
packages/clients/rfsclient/openapi/docs/AuthenticationApi.md
Normal file
37
packages/clients/rfsclient/openapi/docs/AuthenticationApi.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# \AuthenticationApi
|
||||
|
||||
All URIs are relative to *http://localhost*
|
||||
|
||||
Method | HTTP request | Description
|
||||
------------- | ------------- | -------------
|
||||
[**sign_in_handler**](AuthenticationApi.md#sign_in_handler) | **POST** /api/v1/signin |
|
||||
|
||||
|
||||
|
||||
## sign_in_handler
|
||||
|
||||
> models::SignInResponse sign_in_handler(sign_in_body)
|
||||
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**sign_in_body** | [**SignInBody**](SignInBody.md) | | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::SignInResponse**](SignInResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: application/json
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
14
packages/clients/rfsclient/openapi/docs/Block.md
Normal file
14
packages/clients/rfsclient/openapi/docs/Block.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Block
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**data** | [**std::path::PathBuf**](std::path::PathBuf.md) | |
|
||||
**hash** | **String** | |
|
||||
**index** | **i64** | |
|
||||
**size** | **i32** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
# BlockDownloadsResponse
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**block_hash** | **String** | Block hash |
|
||||
**block_size** | **i64** | Size of the block in bytes |
|
||||
**downloads_count** | **i64** | Number of times the block has been downloaded |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
12
packages/clients/rfsclient/openapi/docs/BlockInfo.md
Normal file
12
packages/clients/rfsclient/openapi/docs/BlockInfo.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# BlockInfo
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**hash** | **String** | Block hash |
|
||||
**index** | **i64** | Block index within the file |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
250
packages/clients/rfsclient/openapi/docs/BlockManagementApi.md
Normal file
250
packages/clients/rfsclient/openapi/docs/BlockManagementApi.md
Normal file
@@ -0,0 +1,250 @@
|
||||
# \BlockManagementApi
|
||||
|
||||
All URIs are relative to *http://localhost*
|
||||
|
||||
Method | HTTP request | Description
|
||||
------------- | ------------- | -------------
|
||||
[**check_block_handler**](BlockManagementApi.md#check_block_handler) | **HEAD** /api/v1/block/{hash} | Checks a block by its hash.
|
||||
[**get_block_downloads_handler**](BlockManagementApi.md#get_block_downloads_handler) | **GET** /api/v1/block/{hash}/downloads | Retrieve the number of times a block has been downloaded.
|
||||
[**get_block_handler**](BlockManagementApi.md#get_block_handler) | **GET** /api/v1/block/{hash} | Retrieve a block by its hash.
|
||||
[**get_blocks_by_hash_handler**](BlockManagementApi.md#get_blocks_by_hash_handler) | **GET** /api/v1/blocks/{hash} | Retrieve blocks by hash (file hash or block hash).
|
||||
[**get_user_blocks_handler**](BlockManagementApi.md#get_user_blocks_handler) | **GET** /api/v1/user/blocks | Retrieve all blocks uploaded by a specific user.
|
||||
[**list_blocks_handler**](BlockManagementApi.md#list_blocks_handler) | **GET** /api/v1/blocks | List all block hashes in the server with pagination
|
||||
[**upload_block_handler**](BlockManagementApi.md#upload_block_handler) | **POST** /api/v1/block | Upload a block to the server.
|
||||
[**verify_blocks_handler**](BlockManagementApi.md#verify_blocks_handler) | **POST** /api/v1/block/verify | Verify if multiple blocks exist on the server.
|
||||
|
||||
|
||||
|
||||
## check_block_handler
|
||||
|
||||
> check_block_handler(hash)
|
||||
Checks a block by its hash.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**hash** | **String** | Block hash | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
(empty response body)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## get_block_downloads_handler
|
||||
|
||||
> models::BlockDownloadsResponse get_block_downloads_handler(hash)
|
||||
Retrieve the number of times a block has been downloaded.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**hash** | **String** | Block hash | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::BlockDownloadsResponse**](BlockDownloadsResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## get_block_handler
|
||||
|
||||
> std::path::PathBuf get_block_handler(hash)
|
||||
Retrieve a block by its hash.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**hash** | **String** | Block hash | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**std::path::PathBuf**](std::path::PathBuf.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/octet-stream, application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## get_blocks_by_hash_handler
|
||||
|
||||
> models::BlocksResponse get_blocks_by_hash_handler(hash)
|
||||
Retrieve blocks by hash (file hash or block hash).
|
||||
|
||||
If the hash is a file hash, returns all blocks with their block index related to that file. If the hash is a block hash, returns the block itself.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**hash** | **String** | File hash or block hash | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::BlocksResponse**](BlocksResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## get_user_blocks_handler
|
||||
|
||||
> models::UserBlocksResponse get_user_blocks_handler(page, per_page)
|
||||
Retrieve all blocks uploaded by a specific user.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**page** | Option<**i32**> | Page number (1-indexed) | |
|
||||
**per_page** | Option<**i32**> | Number of items per page | |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::UserBlocksResponse**](UserBlocksResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
[bearerAuth](../README.md#bearerAuth)
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## list_blocks_handler
|
||||
|
||||
> models::ListBlocksResponse list_blocks_handler(page, per_page)
|
||||
List all block hashes in the server with pagination
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**page** | Option<**i32**> | Page number (1-indexed) | |
|
||||
**per_page** | Option<**i32**> | Number of items per page | |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::ListBlocksResponse**](ListBlocksResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## upload_block_handler
|
||||
|
||||
> models::BlockUploadedResponse upload_block_handler(file_hash, idx, body)
|
||||
Upload a block to the server.
|
||||
|
||||
If the block already exists, the server will return a 200 OK response. If the block is new, the server will return a 201 Created response.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**file_hash** | **String** | File hash associated with the block | [required] |
|
||||
**idx** | **i64** | Block index within the file | [required] |
|
||||
**body** | **std::path::PathBuf** | Block data to upload | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::BlockUploadedResponse**](BlockUploadedResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
[bearerAuth](../README.md#bearerAuth)
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: application/octet-stream
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## verify_blocks_handler
|
||||
|
||||
> models::VerifyBlocksResponse verify_blocks_handler(verify_blocks_request)
|
||||
Verify if multiple blocks exist on the server.
|
||||
|
||||
Returns a list of missing blocks.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**verify_blocks_request** | [**VerifyBlocksRequest**](VerifyBlocksRequest.md) | List of block hashes to verify | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::VerifyBlocksResponse**](VerifyBlocksResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: application/json
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
# BlockUploadedResponse
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**hash** | **String** | |
|
||||
**message** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
11
packages/clients/rfsclient/openapi/docs/BlocksResponse.md
Normal file
11
packages/clients/rfsclient/openapi/docs/BlocksResponse.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# BlocksResponse
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**blocks** | [**Vec<models::BlockInfo>**](BlockInfo.md) | List of blocks with their indices |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
12
packages/clients/rfsclient/openapi/docs/DirListTemplate.md
Normal file
12
packages/clients/rfsclient/openapi/docs/DirListTemplate.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# DirListTemplate
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**cur_path** | **String** | |
|
||||
**lister** | [**models::DirLister**](DirLister.md) | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
11
packages/clients/rfsclient/openapi/docs/DirLister.md
Normal file
11
packages/clients/rfsclient/openapi/docs/DirLister.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# DirLister
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**files** | [**Vec<models::FileInfo>**](FileInfo.md) | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
13
packages/clients/rfsclient/openapi/docs/ErrorTemplate.md
Normal file
13
packages/clients/rfsclient/openapi/docs/ErrorTemplate.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# ErrorTemplate
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**cur_path** | **String** | |
|
||||
**err** | [**models::TemplateErr**](TemplateErr.md) | |
|
||||
**message** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
12
packages/clients/rfsclient/openapi/docs/File.md
Normal file
12
packages/clients/rfsclient/openapi/docs/File.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# File
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**file_content** | [**std::path::PathBuf**](std::path::PathBuf.md) | |
|
||||
**file_hash** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# FileDownloadRequest
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**file_name** | **String** | The custom filename to use for download |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
16
packages/clients/rfsclient/openapi/docs/FileInfo.md
Normal file
16
packages/clients/rfsclient/openapi/docs/FileInfo.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# FileInfo
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**is_file** | **bool** | |
|
||||
**last_modified** | **i64** | |
|
||||
**name** | **String** | |
|
||||
**path_uri** | **String** | |
|
||||
**progress** | **f32** | |
|
||||
**size** | **i64** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
71
packages/clients/rfsclient/openapi/docs/FileManagementApi.md
Normal file
71
packages/clients/rfsclient/openapi/docs/FileManagementApi.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# \FileManagementApi
|
||||
|
||||
All URIs are relative to *http://localhost*
|
||||
|
||||
Method | HTTP request | Description
|
||||
------------- | ------------- | -------------
|
||||
[**get_file_handler**](FileManagementApi.md#get_file_handler) | **GET** /api/v1/file/{hash} | Retrieve a file by its hash from path, with optional custom filename in request body.
|
||||
[**upload_file_handler**](FileManagementApi.md#upload_file_handler) | **POST** /api/v1/file | Upload a file to the server.
|
||||
|
||||
|
||||
|
||||
## get_file_handler
|
||||
|
||||
> std::path::PathBuf get_file_handler(hash, file_download_request)
|
||||
Retrieve a file by its hash from path, with optional custom filename in request body.
|
||||
|
||||
The file will be reconstructed from its blocks.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**hash** | **String** | File hash | [required] |
|
||||
**file_download_request** | [**FileDownloadRequest**](FileDownloadRequest.md) | Optional custom filename for download | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**std::path::PathBuf**](std::path::PathBuf.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: application/json
|
||||
- **Accept**: application/octet-stream, application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## upload_file_handler
|
||||
|
||||
> models::FileUploadResponse upload_file_handler(body)
|
||||
Upload a file to the server.
|
||||
|
||||
The file will be split into blocks and stored in the database.
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**body** | **std::path::PathBuf** | File data to upload | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::FileUploadResponse**](FileUploadResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
[bearerAuth](../README.md#bearerAuth)
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: application/octet-stream
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
# FileUploadResponse
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**file_hash** | **String** | The file hash |
|
||||
**message** | **String** | Message indicating success |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
18
packages/clients/rfsclient/openapi/docs/FlistBody.md
Normal file
18
packages/clients/rfsclient/openapi/docs/FlistBody.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# FlistBody
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**auth** | Option<**String**> | | [optional]
|
||||
**email** | Option<**String**> | | [optional]
|
||||
**identity_token** | Option<**String**> | | [optional]
|
||||
**image_name** | **String** | |
|
||||
**password** | Option<**String**> | | [optional]
|
||||
**registry_token** | Option<**String**> | | [optional]
|
||||
**server_address** | Option<**String**> | | [optional]
|
||||
**username** | Option<**String**> | | [optional]
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
150
packages/clients/rfsclient/openapi/docs/FlistManagementApi.md
Normal file
150
packages/clients/rfsclient/openapi/docs/FlistManagementApi.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# \FlistManagementApi
|
||||
|
||||
All URIs are relative to *http://localhost*
|
||||
|
||||
Method | HTTP request | Description
|
||||
------------- | ------------- | -------------
|
||||
[**create_flist_handler**](FlistManagementApi.md#create_flist_handler) | **POST** /api/v1/fl |
|
||||
[**get_flist_state_handler**](FlistManagementApi.md#get_flist_state_handler) | **GET** /api/v1/fl/{job_id} |
|
||||
[**list_flists_handler**](FlistManagementApi.md#list_flists_handler) | **GET** /api/v1/fl |
|
||||
[**preview_flist_handler**](FlistManagementApi.md#preview_flist_handler) | **GET** /api/v1/fl/preview/{flist_path} |
|
||||
[**serve_flists**](FlistManagementApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem
|
||||
|
||||
|
||||
|
||||
## create_flist_handler
|
||||
|
||||
> models::Job create_flist_handler(flist_body)
|
||||
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**flist_body** | [**FlistBody**](FlistBody.md) | | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::Job**](Job.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
[bearerAuth](../README.md#bearerAuth)
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: application/json
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## get_flist_state_handler
|
||||
|
||||
> models::FlistStateResponse get_flist_state_handler(job_id)
|
||||
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**job_id** | **String** | flist job id | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::FlistStateResponse**](FlistStateResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
[bearerAuth](../README.md#bearerAuth)
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## list_flists_handler
|
||||
|
||||
> std::collections::HashMap<String, Vec<models::FileInfo>> list_flists_handler()
|
||||
|
||||
|
||||
### Parameters
|
||||
|
||||
This endpoint does not need any parameter.
|
||||
|
||||
### Return type
|
||||
|
||||
[**std::collections::HashMap<String, Vec<models::FileInfo>>**](Vec.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## preview_flist_handler
|
||||
|
||||
> models::PreviewResponse preview_flist_handler(flist_path)
|
||||
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**flist_path** | **String** | flist file path | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::PreviewResponse**](PreviewResponse.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
## serve_flists
|
||||
|
||||
> std::path::PathBuf serve_flists(path)
|
||||
Serve flist files from the server's filesystem
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**path** | **String** | Path to the flist file or directory to serve | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**std::path::PathBuf**](std::path::PathBuf.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/octet-stream, application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
37
packages/clients/rfsclient/openapi/docs/FlistServingApi.md
Normal file
37
packages/clients/rfsclient/openapi/docs/FlistServingApi.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# \FlistServingApi
|
||||
|
||||
All URIs are relative to *http://localhost*
|
||||
|
||||
Method | HTTP request | Description
|
||||
------------- | ------------- | -------------
|
||||
[**serve_flists**](FlistServingApi.md#serve_flists) | **GET** /{path} | Serve flist files from the server's filesystem
|
||||
|
||||
|
||||
|
||||
## serve_flists
|
||||
|
||||
> models::ResponseResult serve_flists(path)
|
||||
Serve flist files from the server's filesystem
|
||||
|
||||
### Parameters
|
||||
|
||||
|
||||
Name | Type | Description | Required | Notes
|
||||
------------- | ------------- | ------------- | ------------- | -------------
|
||||
**path** | **String** | Path to the flist file or directory to serve | [required] |
|
||||
|
||||
### Return type
|
||||
|
||||
[**models::ResponseResult**](ResponseResult.md)
|
||||
|
||||
### Authorization
|
||||
|
||||
No authorization required
|
||||
|
||||
### HTTP request headers
|
||||
|
||||
- **Content-Type**: Not defined
|
||||
- **Accept**: application/json
|
||||
|
||||
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
||||
|
||||
15
packages/clients/rfsclient/openapi/docs/FlistState.md
Normal file
15
packages/clients/rfsclient/openapi/docs/FlistState.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# FlistState
|
||||
|
||||
## Enum Variants
|
||||
|
||||
| Name | Description |
|
||||
|---- | -----|
|
||||
| FlistStateAccepted | |
|
||||
| FlistStateCreated | |
|
||||
| FlistStateInProgress | |
|
||||
| FlistStateStarted | |
|
||||
| String | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# FlistStateAccepted
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**accepted** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
11
packages/clients/rfsclient/openapi/docs/FlistStateCreated.md
Normal file
11
packages/clients/rfsclient/openapi/docs/FlistStateCreated.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# FlistStateCreated
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**created** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# FlistStateInProgress
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**in_progress** | [**models::FlistStateInfo**](FlistStateInfo.md) | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
12
packages/clients/rfsclient/openapi/docs/FlistStateInfo.md
Normal file
12
packages/clients/rfsclient/openapi/docs/FlistStateInfo.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# FlistStateInfo
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**msg** | **String** | |
|
||||
**progress** | **f32** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# FlistStateResponse
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**flist_state** | [**models::FlistState**](FlistState.md) | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
11
packages/clients/rfsclient/openapi/docs/FlistStateStarted.md
Normal file
11
packages/clients/rfsclient/openapi/docs/FlistStateStarted.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# FlistStateStarted
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**started** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
11
packages/clients/rfsclient/openapi/docs/HealthResponse.md
Normal file
11
packages/clients/rfsclient/openapi/docs/HealthResponse.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# HealthResponse
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**msg** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
11
packages/clients/rfsclient/openapi/docs/Job.md
Normal file
11
packages/clients/rfsclient/openapi/docs/Job.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Job
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**id** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
12
packages/clients/rfsclient/openapi/docs/ListBlocksParams.md
Normal file
12
packages/clients/rfsclient/openapi/docs/ListBlocksParams.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# ListBlocksParams
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**page** | Option<**i32**> | Page number (1-indexed) | [optional][default to 1]
|
||||
**per_page** | Option<**i32**> | Number of items per page | [optional][default to 50]
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
# ListBlocksResponse
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**blocks** | **Vec<String>** | List of block hashes |
|
||||
**page** | **i32** | Current page number |
|
||||
**per_page** | **i32** | Number of items per page |
|
||||
**total** | **i64** | Total number of blocks |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
13
packages/clients/rfsclient/openapi/docs/PreviewResponse.md
Normal file
13
packages/clients/rfsclient/openapi/docs/PreviewResponse.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# PreviewResponse
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**checksum** | **String** | |
|
||||
**content** | **Vec<String>** | |
|
||||
**metadata** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
17
packages/clients/rfsclient/openapi/docs/ResponseError.md
Normal file
17
packages/clients/rfsclient/openapi/docs/ResponseError.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# ResponseError
|
||||
|
||||
## Enum Variants
|
||||
|
||||
| Name | Description |
|
||||
|---- | -----|
|
||||
| ResponseErrorBadRequest | |
|
||||
| ResponseErrorConflict | |
|
||||
| ResponseErrorForbidden | |
|
||||
| ResponseErrorNotFound | |
|
||||
| ResponseErrorTemplateError | |
|
||||
| ResponseErrorUnauthorized | |
|
||||
| String | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# ResponseErrorBadRequest
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**bad_request** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# ResponseErrorConflict
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**conflict** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# ResponseErrorForbidden
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**forbidden** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# ResponseErrorNotFound
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**not_found** | **String** | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
# ResponseErrorTemplateError
|
||||
|
||||
## Properties
|
||||
|
||||
Name | Type | Description | Notes
|
||||
------------ | ------------- | ------------- | -------------
|
||||
**template_error** | [**models::ErrorTemplate**](ErrorTemplate.md) | |
|
||||
|
||||
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user