Merge branch 'development' into development_fix_mcpservers
This commit is contained in:
1440
aiprompts/instructions/herodb_base.md
Normal file
1440
aiprompts/instructions/herodb_base.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,2 +0,0 @@
|
||||
.example_1_actor
|
||||
.example_2_actor
|
||||
@@ -1,19 +0,0 @@
|
||||
## Blank Actor Generation Example
|
||||
|
||||
This example shows how to generate a blank actor (unspecified, except for name). The generated actor module contains all the boilerplate code of an actor that can be compiled but lacks ant state or methods.
|
||||
|
||||
Simply run:
|
||||
```
|
||||
chmod +x *.vsh
|
||||
example_1.vsh
|
||||
example_2.vsh
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
There are two examples of blank actor generation.
|
||||
- `example_1.vsh` generates the actor from a blank specification structure.
|
||||
- `example_2.vsh` generates the actor from a blank OpenAPI Specification.
|
||||
|
||||
<!-- TODO: write below -->
|
||||
Read []() to learn how actor's are generated from specifications, and how the two example's differ.
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.hero.generation
|
||||
|
||||
generation.generate_actor(
|
||||
name: 'Example'
|
||||
)
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.hero.generation
|
||||
|
||||
generation.generate_actor(
|
||||
name: 'Example'
|
||||
interfaces: []
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
# Hero Generation Example
|
||||
|
||||
## Getting started
|
||||
|
||||
### Step 1: Generate specification
|
||||
|
||||
### Step 2: Generate actor from specification
|
||||
|
||||
The script below generates the actor's OpenAPI handler from a given OpenAPI Specification. The generated code is written to `handler.v` in the example actor's module.
|
||||
|
||||
`generate_actor.vsh`
|
||||
|
||||
### Step 3: Run actor
|
||||
|
||||
The script below runs the actor's Redis RPC Queue Interface and uses the generated handler function to handle incoming RPCs. The Redis Interface listens to the RPC Queue assigned to the actor.
|
||||
|
||||
`run_interface_procedure.vsh`
|
||||
|
||||
### Step 3: Run server
|
||||
|
||||
The script below runs the actor's RPC Queue Listener and uses the generated handler function to handle incoming RPCs.
|
||||
|
||||
`run_interface_openapi.vsh`
|
||||
@@ -1 +0,0 @@
|
||||
# Example Actor
|
||||
@@ -1,34 +0,0 @@
|
||||
module example_actor
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.hero.baobab.stage { IActor, RunParams }
|
||||
import freeflowuniverse.herolib.web.openapi
|
||||
import time
|
||||
|
||||
const openapi_spec_path = '${os.dir(@FILE)}/specs/openapi.json'
|
||||
const openapi_spec_json = os.read_file(openapi_spec_path) or { panic(err) }
|
||||
const openapi_specification = openapi.json_decode(openapi_spec_json)!
|
||||
|
||||
struct ExampleActor {
|
||||
stage.Actor
|
||||
}
|
||||
|
||||
fn new() !ExampleActor {
|
||||
return ExampleActor{stage.new_actor('example')}
|
||||
}
|
||||
|
||||
pub fn run() ! {
|
||||
mut a_ := new()!
|
||||
mut a := IActor(a_)
|
||||
a.run()!
|
||||
}
|
||||
|
||||
pub fn run_server(params RunParams) ! {
|
||||
mut a := new()!
|
||||
mut server := actor.new_server(
|
||||
redis_url: 'localhost:6379'
|
||||
redis_queue: a.name
|
||||
openapi_spec: openapi_specification
|
||||
)!
|
||||
server.run(params)
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
module example_actor
|
||||
|
||||
const test_port = 8101
|
||||
|
||||
pub fn test_new() ! {
|
||||
new() or { return error('Failed to create actor:\n${err}') }
|
||||
}
|
||||
|
||||
pub fn test_run() ! {
|
||||
spawn run()
|
||||
}
|
||||
|
||||
pub fn test_run_server() ! {
|
||||
spawn run_server(port: test_port)
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
module example_actor
|
||||
|
||||
pub fn (mut a ExampleActor) handle(method string, data string) !string {
|
||||
return data
|
||||
}
|
||||
@@ -1,346 +0,0 @@
|
||||
{
|
||||
"openapi": "3.0.3",
|
||||
"info": {
|
||||
"title": "Pet Store API",
|
||||
"description": "A sample API for a pet store",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"url": "https://api.petstore.example.com/v1",
|
||||
"description": "Production server"
|
||||
},
|
||||
{
|
||||
"url": "https://staging.petstore.example.com/v1",
|
||||
"description": "Staging server"
|
||||
}
|
||||
],
|
||||
"paths": {
|
||||
"/pets": {
|
||||
"get": {
|
||||
"summary": "List all pets",
|
||||
"operationId": "listPets",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "limit",
|
||||
"in": "query",
|
||||
"description": "Maximum number of pets to return",
|
||||
"required": false,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A paginated list of pets",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Pets"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid request"
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"summary": "Create a new pet",
|
||||
"operationId": "createPet",
|
||||
"requestBody": {
|
||||
"required": true,
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/NewPet"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"description": "Pet created",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid input"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/pets/{petId}": {
|
||||
"get": {
|
||||
"summary": "Get a pet by ID",
|
||||
"operationId": "getPet",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "petId",
|
||||
"in": "path",
|
||||
"description": "ID of the pet to retrieve",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A pet",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Pet not found"
|
||||
}
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"summary": "Delete a pet by ID",
|
||||
"operationId": "deletePet",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "petId",
|
||||
"in": "path",
|
||||
"description": "ID of the pet to delete",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "Pet deleted"
|
||||
},
|
||||
"404": {
|
||||
"description": "Pet not found"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/orders": {
|
||||
"get": {
|
||||
"summary": "List all orders",
|
||||
"operationId": "listOrders",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A list of orders",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Order"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/orders/{orderId}": {
|
||||
"get": {
|
||||
"summary": "Get an order by ID",
|
||||
"operationId": "getOrder",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "orderId",
|
||||
"in": "path",
|
||||
"description": "ID of the order to retrieve",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "An order",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Order"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Order not found"
|
||||
}
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"summary": "Delete an order by ID",
|
||||
"operationId": "deleteOrder",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "orderId",
|
||||
"in": "path",
|
||||
"description": "ID of the order to delete",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "Order deleted"
|
||||
},
|
||||
"404": {
|
||||
"description": "Order not found"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/users": {
|
||||
"post": {
|
||||
"summary": "Create a user",
|
||||
"operationId": "createUser",
|
||||
"requestBody": {
|
||||
"required": true,
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/NewUser"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"description": "User created",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/User"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"components": {
|
||||
"schemas": {
|
||||
"Pet": {
|
||||
"type": "object",
|
||||
"required": ["id", "name"],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"tag": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"NewPet": {
|
||||
"type": "object",
|
||||
"required": ["name"],
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"tag": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"Pets": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
},
|
||||
"Order": {
|
||||
"type": "object",
|
||||
"required": ["id", "petId", "quantity", "shipDate"],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"petId": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"quantity": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"shipDate": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": ["placed", "approved", "delivered"]
|
||||
},
|
||||
"complete": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"User": {
|
||||
"type": "object",
|
||||
"required": ["id", "username"],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"username": {
|
||||
"type": "string"
|
||||
},
|
||||
"email": {
|
||||
"type": "string"
|
||||
},
|
||||
"phone": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"NewUser": {
|
||||
"type": "object",
|
||||
"required": ["username"],
|
||||
"properties": {
|
||||
"username": {
|
||||
"type": "string"
|
||||
},
|
||||
"email": {
|
||||
"type": "string"
|
||||
},
|
||||
"phone": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
// import example_actor
|
||||
|
||||
// example_actor.run_interface_procedure()
|
||||
6
examples/hero/herofs/herofs_advanced.vsh
Normal file → Executable file
6
examples/hero/herofs/herofs_advanced.vsh
Normal file → Executable file
@@ -194,11 +194,13 @@ fn main() {
|
||||
// 1. Move a file to multiple directories (hard link-like behavior)
|
||||
println('Moving logo.png to both images and docs directories...')
|
||||
image_file = fs_factory.fs_file.get(image_file_id)!
|
||||
image_file = fs_factory.fs_file.move(image_file_id, [images_dir_id, docs_dir_id])!
|
||||
fs_factory.fs_file.move(image_file_id, [images_dir_id, docs_dir_id])!
|
||||
image_file = fs_factory.fs_file.get(image_file_id)!
|
||||
|
||||
// 2. Rename a file
|
||||
println('Renaming main.v to app.v...')
|
||||
code_file = fs_factory.fs_file.rename(code_file_id, 'app.v')!
|
||||
fs_factory.fs_file.rename(code_file_id, 'app.v')!
|
||||
code_file = fs_factory.fs_file.get(code_file_id)!
|
||||
|
||||
// 3. Update file metadata
|
||||
println('Updating file metadata...')
|
||||
|
||||
1
examples/hero/herorpc/.gitignore
vendored
Normal file
1
examples/hero/herorpc/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
herorpc_example
|
||||
22
examples/hero/herorpc/herorpc_example.vsh
Executable file
22
examples/hero/herorpc/herorpc_example.vsh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.hero.heromodels.rpc
|
||||
|
||||
println('
|
||||
#to test the discover function:
|
||||
echo \'\{"jsonrpc":"2.0","method":"rpc.discover","params":[],"id":1\}\' \\
|
||||
| nc -U /tmp/heromodels
|
||||
\'
|
||||
#to test interactively:
|
||||
|
||||
nc -U /tmp/heromodels
|
||||
|
||||
then e.g. do
|
||||
|
||||
\{"jsonrpc":"2.0","method":"comment_set","params":{"comment":"Hello world!","parent":0,"author":42},"id":1\}
|
||||
|
||||
needs to be on one line for openrpc to work
|
||||
|
||||
')
|
||||
|
||||
rpc.start()!
|
||||
2
examples/hero/openapi/.gitignore
vendored
2
examples/hero/openapi/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
actor
|
||||
server
|
||||
@@ -1,103 +0,0 @@
|
||||
# OpenAPI Server with Redis-Based RPC and Actor
|
||||
|
||||
This project demonstrates how to implement a system consisting of:
|
||||
1. An OpenAPI Server: Handles HTTP requests and translates them into procedure calls.
|
||||
2. A Redis-Based RPC Processor: Acts as the communication layer between the server and the actor.
|
||||
3. An Actor: Listens for RPC requests on a Redis queue and executes predefined procedures.
|
||||
|
||||
## Features
|
||||
• OpenAPI server to manage HTTP requests.
|
||||
• Redis-based RPC mechanism for message passing.
|
||||
• Actor pattern for executing and responding to RPC tasks.
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
Prerequisites
|
||||
• Redis installed and running on localhost:6379.
|
||||
• V programming language installed.
|
||||
|
||||
Steps to Run
|
||||
|
||||
1. OpenAPI Specification
|
||||
|
||||
Place the OpenAPI JSON specification file at:
|
||||
|
||||
`data/openapi.json`
|
||||
|
||||
This file defines the API endpoints and their parameters.
|
||||
|
||||
2. Start the Redis Server
|
||||
|
||||
Ensure Redis is running locally:
|
||||
|
||||
redis-server
|
||||
|
||||
3. Start the OpenAPI Server
|
||||
|
||||
Run the OpenAPI server:
|
||||
|
||||
`server.vsh`
|
||||
|
||||
The server listens on port 8080 by default.
|
||||
|
||||
4. Start the Actor
|
||||
|
||||
Run the actor service:
|
||||
|
||||
`actor.vsh`
|
||||
|
||||
The actor listens to the procedure_queue for RPC messages.
|
||||
|
||||
Usage
|
||||
|
||||
API Endpoints
|
||||
|
||||
The API supports operations like:
|
||||
• Create a Pet: Adds a new pet.
|
||||
• List Pets: Lists all pets or limits results.
|
||||
• Get Pet by ID: Fetches a specific pet by ID.
|
||||
• Delete Pet: Removes a pet by ID.
|
||||
• Similar operations for users and orders.
|
||||
|
||||
Use tools like curl, Postman, or a browser to interact with the endpoints.
|
||||
|
||||
Example Requests
|
||||
|
||||
Create a Pet
|
||||
|
||||
curl -X POST http://localhost:8080/pets -d '{"name": "Buddy", "tag": "dog"}' -H "Content-Type: application/json"
|
||||
|
||||
List Pets
|
||||
|
||||
curl http://localhost:8080/pets
|
||||
|
||||
## Code Overview
|
||||
|
||||
1. OpenAPI Server
|
||||
• Reads the OpenAPI JSON file.
|
||||
• Maps HTTP requests to procedure calls using the operation ID.
|
||||
• Sends procedure calls to the Redis RPC queue.
|
||||
|
||||
2. Redis-Based RPC
|
||||
• Implements a simple message queue using Redis.
|
||||
• Encodes requests as JSON strings for transport.
|
||||
|
||||
3. Actor
|
||||
• Listens to the procedure_queue Redis queue.
|
||||
• Executes tasks like managing pets, orders, and users.
|
||||
• Responds with JSON-encoded results or errors.
|
||||
|
||||
## Extending the System
|
||||
|
||||
Add New Procedures
|
||||
1. Define new methods in the Actor to handle tasks.
|
||||
2. Add corresponding logic in the DataStore for storage operations.
|
||||
3. Update the OpenAPI JSON file to expose new endpoints.
|
||||
|
||||
Modify Data Models
|
||||
1. Update the Pet, Order, and User structs as needed.
|
||||
2. Adjust the DataStore methods to handle the changes.
|
||||
|
||||
Troubleshooting
|
||||
• Redis Connection Issues: Ensure Redis is running and accessible on localhost:6379.
|
||||
• JSON Parsing Errors: Validate the input JSON against the OpenAPI specification.
|
||||
Binary file not shown.
@@ -1,233 +0,0 @@
|
||||
#!/usr/bin/env -S v -w -n -enable-globals run
|
||||
|
||||
import os
|
||||
import time
|
||||
import veb
|
||||
import json
|
||||
import x.json2
|
||||
import net.http
|
||||
import freeflowuniverse.herolib.web.openapi
|
||||
import freeflowuniverse.herolib.hero.processor
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
@[heap]
|
||||
struct Actor {
|
||||
mut:
|
||||
rpc redisclient.RedisRpc
|
||||
data_store DataStore
|
||||
}
|
||||
|
||||
pub struct DataStore {
|
||||
mut:
|
||||
pets map[int]Pet
|
||||
orders map[int]Order
|
||||
users map[int]User
|
||||
}
|
||||
|
||||
struct Pet {
|
||||
id int
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
struct Order {
|
||||
id int
|
||||
pet_id int
|
||||
quantity int
|
||||
ship_date string
|
||||
status string
|
||||
complete bool
|
||||
}
|
||||
|
||||
struct User {
|
||||
id int
|
||||
username string
|
||||
email string
|
||||
phone string
|
||||
}
|
||||
|
||||
// Entry point for the actor
|
||||
fn main() {
|
||||
mut redis := redisclient.new('localhost:6379') or { panic(err) }
|
||||
mut rpc := redis.rpc_get('procedure_queue')
|
||||
|
||||
mut actor := Actor{
|
||||
rpc: rpc
|
||||
data_store: DataStore{}
|
||||
}
|
||||
|
||||
actor.listen() or { panic(err) }
|
||||
}
|
||||
|
||||
// Actor listens to the Redis queue for method invocations
|
||||
fn (mut actor Actor) listen() ! {
|
||||
println('Actor started and listening for tasks...')
|
||||
for {
|
||||
actor.rpc.process(actor.handle_method)!
|
||||
time.sleep(time.millisecond * 100) // Prevent CPU spinning
|
||||
}
|
||||
}
|
||||
|
||||
// Handle method invocations
|
||||
fn (mut actor Actor) handle_method(cmd string, data string) !string {
|
||||
param_anys := json2.raw_decode(data)!.arr()
|
||||
match cmd {
|
||||
'listPets' {
|
||||
pets := if param_anys.len == 0 {
|
||||
actor.data_store.list_pets()
|
||||
} else {
|
||||
params := json.decode(ListPetParams, param_anys[0].str())!
|
||||
actor.data_store.list_pets(params)
|
||||
}
|
||||
return json.encode(pets)
|
||||
}
|
||||
'createPet' {
|
||||
response := if param_anys.len == 0 {
|
||||
return error('at least data expected')
|
||||
} else if param_anys.len == 1 {
|
||||
payload := json.decode(NewPet, param_anys[0].str())!
|
||||
actor.data_store.create_pet(payload)
|
||||
} else {
|
||||
return error('expected 1 param, found too many')
|
||||
}
|
||||
// data := json.decode(NewPet, data) or { return error('Invalid pet data: $err') }
|
||||
// created_pet := actor.data_store.create_pet(pet)
|
||||
return json.encode(response)
|
||||
}
|
||||
'getPet' {
|
||||
response := if param_anys.len == 0 {
|
||||
return error('at least data expected')
|
||||
} else if param_anys.len == 1 {
|
||||
payload := param_anys[0].int()
|
||||
actor.data_store.get_pet(payload)!
|
||||
} else {
|
||||
return error('expected 1 param, found too many')
|
||||
}
|
||||
|
||||
return json.encode(response)
|
||||
}
|
||||
'deletePet' {
|
||||
params := json.decode(map[string]int, data) or {
|
||||
return error('Invalid params: ${err}')
|
||||
}
|
||||
actor.data_store.delete_pet(params['petId']) or {
|
||||
return error('Pet not found: ${err}')
|
||||
}
|
||||
return json.encode({
|
||||
'message': 'Pet deleted'
|
||||
})
|
||||
}
|
||||
'listOrders' {
|
||||
orders := actor.data_store.list_orders()
|
||||
return json.encode(orders)
|
||||
}
|
||||
'getOrder' {
|
||||
params := json.decode(map[string]int, data) or {
|
||||
return error('Invalid params: ${err}')
|
||||
}
|
||||
order := actor.data_store.get_order(params['orderId']) or {
|
||||
return error('Order not found: ${err}')
|
||||
}
|
||||
return json.encode(order)
|
||||
}
|
||||
'deleteOrder' {
|
||||
params := json.decode(map[string]int, data) or {
|
||||
return error('Invalid params: ${err}')
|
||||
}
|
||||
actor.data_store.delete_order(params['orderId']) or {
|
||||
return error('Order not found: ${err}')
|
||||
}
|
||||
return json.encode({
|
||||
'message': 'Order deleted'
|
||||
})
|
||||
}
|
||||
'createUser' {
|
||||
user := json.decode(NewUser, data) or { return error('Invalid user data: ${err}') }
|
||||
created_user := actor.data_store.create_user(user)
|
||||
return json.encode(created_user)
|
||||
}
|
||||
else {
|
||||
return error('Unknown method: ${cmd}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct ListPetParams {
|
||||
limit u32
|
||||
}
|
||||
|
||||
// DataStore methods for managing data
|
||||
fn (mut store DataStore) list_pets(params ListPetParams) []Pet {
|
||||
if params.limit > 0 {
|
||||
if params.limit >= store.pets.values().len {
|
||||
return store.pets.values()
|
||||
}
|
||||
return store.pets.values()[..params.limit]
|
||||
}
|
||||
return store.pets.values()
|
||||
}
|
||||
|
||||
fn (mut store DataStore) create_pet(new_pet NewPet) Pet {
|
||||
id := store.pets.keys().len + 1
|
||||
pet := Pet{
|
||||
id: id
|
||||
name: new_pet.name
|
||||
tag: new_pet.tag
|
||||
}
|
||||
store.pets[id] = pet
|
||||
return pet
|
||||
}
|
||||
|
||||
fn (mut store DataStore) get_pet(id int) !Pet {
|
||||
return store.pets[id] or { return error('Pet with id ${id} not found.') }
|
||||
}
|
||||
|
||||
fn (mut store DataStore) delete_pet(id int) ! {
|
||||
if id in store.pets {
|
||||
store.pets.delete(id)
|
||||
return
|
||||
}
|
||||
return error('Pet not found')
|
||||
}
|
||||
|
||||
fn (mut store DataStore) list_orders() []Order {
|
||||
return store.orders.values()
|
||||
}
|
||||
|
||||
fn (mut store DataStore) get_order(id int) !Order {
|
||||
return store.orders[id] or { none }
|
||||
}
|
||||
|
||||
fn (mut store DataStore) delete_order(id int) ! {
|
||||
if id in store.orders {
|
||||
store.orders.delete(id)
|
||||
return
|
||||
}
|
||||
return error('Order not found')
|
||||
}
|
||||
|
||||
fn (mut store DataStore) create_user(new_user NewUser) User {
|
||||
id := store.users.keys().len + 1
|
||||
user := User{
|
||||
id: id
|
||||
username: new_user.username
|
||||
email: new_user.email
|
||||
phone: new_user.phone
|
||||
}
|
||||
store.users[id] = user
|
||||
return user
|
||||
}
|
||||
|
||||
// NewPet struct for creating a pet
|
||||
struct NewPet {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
// NewUser struct for creating a user
|
||||
struct NewUser {
|
||||
username string
|
||||
email string
|
||||
phone string
|
||||
}
|
||||
@@ -1,346 +0,0 @@
|
||||
{
|
||||
"openapi": "3.0.3",
|
||||
"info": {
|
||||
"title": "Pet Store API",
|
||||
"description": "A sample API for a pet store",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"url": "https://api.petstore.example.com/v1",
|
||||
"description": "Production server"
|
||||
},
|
||||
{
|
||||
"url": "https://staging.petstore.example.com/v1",
|
||||
"description": "Staging server"
|
||||
}
|
||||
],
|
||||
"paths": {
|
||||
"/pets": {
|
||||
"get": {
|
||||
"summary": "List all pets",
|
||||
"operationId": "listPets",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "limit",
|
||||
"in": "query",
|
||||
"description": "Maximum number of pets to return",
|
||||
"required": false,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A paginated list of pets",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Pets"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid request"
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"summary": "Create a new pet",
|
||||
"operationId": "createPet",
|
||||
"requestBody": {
|
||||
"required": true,
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/NewPet"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"description": "Pet created",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Invalid input"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/pets/{petId}": {
|
||||
"get": {
|
||||
"summary": "Get a pet by ID",
|
||||
"operationId": "getPet",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "petId",
|
||||
"in": "path",
|
||||
"description": "ID of the pet to retrieve",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A pet",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Pet not found"
|
||||
}
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"summary": "Delete a pet by ID",
|
||||
"operationId": "deletePet",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "petId",
|
||||
"in": "path",
|
||||
"description": "ID of the pet to delete",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "Pet deleted"
|
||||
},
|
||||
"404": {
|
||||
"description": "Pet not found"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/orders": {
|
||||
"get": {
|
||||
"summary": "List all orders",
|
||||
"operationId": "listOrders",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A list of orders",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Order"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/orders/{orderId}": {
|
||||
"get": {
|
||||
"summary": "Get an order by ID",
|
||||
"operationId": "getOrder",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "orderId",
|
||||
"in": "path",
|
||||
"description": "ID of the order to retrieve",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "An order",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Order"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Order not found"
|
||||
}
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"summary": "Delete an order by ID",
|
||||
"operationId": "deleteOrder",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "orderId",
|
||||
"in": "path",
|
||||
"description": "ID of the order to delete",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "Order deleted"
|
||||
},
|
||||
"404": {
|
||||
"description": "Order not found"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/users": {
|
||||
"post": {
|
||||
"summary": "Create a user",
|
||||
"operationId": "createUser",
|
||||
"requestBody": {
|
||||
"required": true,
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/NewUser"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"201": {
|
||||
"description": "User created",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/User"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"components": {
|
||||
"schemas": {
|
||||
"Pet": {
|
||||
"type": "object",
|
||||
"required": ["id", "name"],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"tag": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"NewPet": {
|
||||
"type": "object",
|
||||
"required": ["name"],
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"tag": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"Pets": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
},
|
||||
"Order": {
|
||||
"type": "object",
|
||||
"required": ["id", "petId", "quantity", "shipDate"],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"petId": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"quantity": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"shipDate": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": ["placed", "approved", "delivered"]
|
||||
},
|
||||
"complete": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"User": {
|
||||
"type": "object",
|
||||
"required": ["id", "username"],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"format": "int64"
|
||||
},
|
||||
"username": {
|
||||
"type": "string"
|
||||
},
|
||||
"email": {
|
||||
"type": "string"
|
||||
},
|
||||
"phone": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"NewUser": {
|
||||
"type": "object",
|
||||
"required": ["username"],
|
||||
"properties": {
|
||||
"username": {
|
||||
"type": "string"
|
||||
},
|
||||
"email": {
|
||||
"type": "string"
|
||||
},
|
||||
"phone": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Binary file not shown.
@@ -1,138 +0,0 @@
|
||||
#!/usr/bin/env -S v -w -n -enable-globals run
|
||||
|
||||
import os
|
||||
import time
|
||||
import veb
|
||||
import json
|
||||
import x.json2 { Any }
|
||||
import net.http
|
||||
import freeflowuniverse.herolib.data.jsonschema { Schema }
|
||||
import freeflowuniverse.herolib.web.openapi { Context, Request, Response, Server }
|
||||
import freeflowuniverse.herolib.hero.processor { ProcedureCall, ProcessParams, Processor }
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
const spec_path = '${os.dir(@FILE)}/data/openapi.json'
|
||||
const spec_json = os.read_file(spec_path) or { panic(err) }
|
||||
|
||||
// Main function to start the server
|
||||
fn main() {
|
||||
// Initialize the Redis client and RPC mechanism
|
||||
mut redis := redisclient.new('localhost:6379')!
|
||||
mut rpc := redis.rpc_get('procedure_queue')
|
||||
|
||||
// Initialize the server
|
||||
mut server := &Server{
|
||||
specification: openapi.json_decode(spec_json)!
|
||||
handler: Handler{
|
||||
processor: Processor{
|
||||
rpc: rpc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start the server
|
||||
veb.run[Server, Context](mut server, 8080)
|
||||
}
|
||||
|
||||
pub struct Handler {
|
||||
mut:
|
||||
processor Processor
|
||||
}
|
||||
|
||||
fn (mut handler Handler) handle(request Request) !Response {
|
||||
// Convert incoming OpenAPI request to a procedure call
|
||||
mut params := []string{}
|
||||
|
||||
if request.arguments.len > 0 {
|
||||
params = request.arguments.values().map(it.str()).clone()
|
||||
}
|
||||
|
||||
if request.body != '' {
|
||||
params << request.body
|
||||
}
|
||||
|
||||
if request.parameters.len != 0 {
|
||||
mut param_map := map[string]Any{} // Store parameters with correct types
|
||||
|
||||
for param_name, param_value in request.parameters {
|
||||
operation_param := request.operation.parameters.filter(it.name == param_name)
|
||||
if operation_param.len > 0 {
|
||||
param_schema := operation_param[0].schema as Schema
|
||||
param_type := param_schema.typ
|
||||
param_format := param_schema.format
|
||||
|
||||
// Convert parameter value to corresponding type
|
||||
match param_type {
|
||||
'integer' {
|
||||
match param_format {
|
||||
'int32' {
|
||||
param_map[param_name] = param_value.int() // Convert to int
|
||||
}
|
||||
'int64' {
|
||||
param_map[param_name] = param_value.i64() // Convert to i64
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value.int() // Default to int
|
||||
}
|
||||
}
|
||||
}
|
||||
'string' {
|
||||
param_map[param_name] = param_value // Already a string
|
||||
}
|
||||
'boolean' {
|
||||
param_map[param_name] = param_value.bool() // Convert to bool
|
||||
}
|
||||
'number' {
|
||||
match param_format {
|
||||
'float' {
|
||||
param_map[param_name] = param_value.f32() // Convert to float
|
||||
}
|
||||
'double' {
|
||||
param_map[param_name] = param_value.f64() // Convert to double
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value.f64() // Default to double
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value // Leave as string for unknown types
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the parameter is not defined in the OpenAPI operation, skip or log it
|
||||
println('Unknown parameter: ${param_name}')
|
||||
}
|
||||
}
|
||||
|
||||
// Encode the parameter map to JSON if needed
|
||||
params << json.encode(param_map.str())
|
||||
}
|
||||
|
||||
call := ProcedureCall{
|
||||
method: request.operation.operation_id
|
||||
params: '[${params.join(',')}]' // Keep as a string since ProcedureCall expects a string
|
||||
}
|
||||
|
||||
// Process the procedure call
|
||||
procedure_response := handler.processor.process(call, ProcessParams{
|
||||
timeout: 30 // Set timeout in seconds
|
||||
}) or {
|
||||
// Handle ProcedureError
|
||||
if err is processor.ProcedureError {
|
||||
return Response{
|
||||
status: http.status_from_int(err.code()) // Map ProcedureError reason to HTTP status code
|
||||
body: json.encode({
|
||||
'error': err.msg()
|
||||
})
|
||||
}
|
||||
}
|
||||
return error('Unexpected error: ${err}')
|
||||
}
|
||||
|
||||
// Convert returned procedure response to OpenAPI response
|
||||
return Response{
|
||||
status: http.Status.ok // Assuming success if no error
|
||||
body: procedure_response.result
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@ println('=== HeroPods Refactored API Demo ===')
|
||||
// Step 1: factory.new() now only creates a container definition/handle
|
||||
// It does NOT create the actual container in the backend yet
|
||||
mut container := factory.new(
|
||||
name: 'myalpine'
|
||||
name: 'demo_alpine'
|
||||
image: .custom
|
||||
custom_image_name: 'alpine_3_20'
|
||||
docker_url: 'docker.io/library/alpine:3.20'
|
||||
|
||||
@@ -8,7 +8,7 @@ mut factory := heropods.new(
|
||||
) or { panic('Failed to init ContainerFactory: ${err}') }
|
||||
|
||||
mut container := factory.new(
|
||||
name: 'myalpine'
|
||||
name: 'alpine_demo'
|
||||
image: .custom
|
||||
custom_image_name: 'alpine_3_20'
|
||||
docker_url: 'docker.io/library/alpine:3.20'
|
||||
|
||||
@@ -11,13 +11,22 @@ import freeflowuniverse.herolib.core.texttools
|
||||
pub struct ExecutorCrun {
|
||||
pub mut:
|
||||
container_id string // container ID for crun
|
||||
crun_root string // custom crun root directory
|
||||
retry int = 1
|
||||
debug bool = true
|
||||
}
|
||||
|
||||
// Helper method to get crun command with custom root
|
||||
fn (executor ExecutorCrun) crun_cmd(cmd string) string {
|
||||
if executor.crun_root != '' {
|
||||
return 'crun --root ${executor.crun_root} ${cmd}'
|
||||
}
|
||||
return 'crun ${cmd}'
|
||||
}
|
||||
|
||||
pub fn (mut executor ExecutorCrun) init() ! {
|
||||
// Verify container exists and is running
|
||||
result := osal.exec(cmd: 'crun state ${executor.container_id}', stdout: false) or {
|
||||
result := osal.exec(cmd: executor.crun_cmd('state ${executor.container_id}'), stdout: false) or {
|
||||
return error('Container ${executor.container_id} not found or not accessible')
|
||||
}
|
||||
|
||||
@@ -41,7 +50,7 @@ pub fn (mut executor ExecutorCrun) exec(args_ ExecArgs) !string {
|
||||
console.print_debug('execute in container ${executor.container_id}: ${args.cmd}')
|
||||
}
|
||||
|
||||
mut cmd := 'crun exec ${executor.container_id} ${args.cmd}'
|
||||
mut cmd := executor.crun_cmd('exec ${executor.container_id} ${args.cmd}')
|
||||
if args.cmd.contains('\n') {
|
||||
// For multiline commands, write to temp file first
|
||||
temp_script := '/tmp/crun_script_${rand.uuid_v4()}.sh'
|
||||
@@ -50,7 +59,7 @@ pub fn (mut executor ExecutorCrun) exec(args_ ExecArgs) !string {
|
||||
|
||||
// Copy script into container and execute
|
||||
executor.file_write('/tmp/exec_script.sh', script_content)!
|
||||
cmd = 'crun exec ${executor.container_id} bash /tmp/exec_script.sh'
|
||||
cmd = executor.crun_cmd('exec ${executor.container_id} bash /tmp/exec_script.sh')
|
||||
}
|
||||
|
||||
res := osal.exec(cmd: cmd, stdout: args.stdout, debug: executor.debug)!
|
||||
@@ -66,7 +75,7 @@ pub fn (mut executor ExecutorCrun) exec_interactive(args_ ExecArgs) ! {
|
||||
args.cmd = 'bash /tmp/interactive_script.sh'
|
||||
}
|
||||
|
||||
cmd := 'crun exec -t ${executor.container_id} ${args.cmd}'
|
||||
cmd := executor.crun_cmd('exec -t ${executor.container_id} ${args.cmd}')
|
||||
console.print_debug(cmd)
|
||||
osal.execute_interactive(cmd)!
|
||||
}
|
||||
@@ -82,7 +91,8 @@ pub fn (mut executor ExecutorCrun) file_write(path string, text string) ! {
|
||||
defer { os.rm(temp_file) or {} }
|
||||
|
||||
// Use crun exec to copy file content
|
||||
cmd := 'cat ${temp_file} | crun exec -i ${executor.container_id} tee ${path} > /dev/null'
|
||||
sbcmd := executor.crun_cmd('exec -i ${executor.container_id} tee ${path}')
|
||||
cmd := 'cat ${temp_file} | ${sbcmd} > /dev/null'
|
||||
osal.exec(cmd: cmd, stdout: false)!
|
||||
}
|
||||
|
||||
|
||||
115
lib/hero/herofs/README.md
Normal file
115
lib/hero/herofs/README.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# HeroFS - Distributed Filesystem for HeroLib
|
||||
|
||||
HeroFS is a distributed filesystem implementation built on top of HeroDB (Redis-based storage). It provides a virtual filesystem with support for files, directories, symbolic links, and binary data blobs.
|
||||
|
||||
## Overview
|
||||
|
||||
HeroFS implements a filesystem structure where:
|
||||
- **Fs**: Represents a filesystem as a top-level container
|
||||
- **FsDir**: Represents directories within a filesystem
|
||||
- **FsFile**: Represents files with support for multiple directory associations
|
||||
- **FsSymlink**: Represents symbolic links pointing to files or directories
|
||||
- **FsBlob**: Represents binary data chunks (up to 1MB) used as file content
|
||||
|
||||
## Features
|
||||
|
||||
- Distributed storage using Redis
|
||||
- Support for files, directories, and symbolic links
|
||||
- Blob-based file content storage with integrity verification
|
||||
- Multiple directory associations for files (similar to hard links)
|
||||
- Filesystem quotas and usage tracking
|
||||
- Metadata support for files
|
||||
- Efficient lookup mechanisms using Redis hash sets
|
||||
|
||||
## Installation
|
||||
|
||||
HeroFS is part of HeroLib and is automatically available when using HeroLib.
|
||||
|
||||
## Usage
|
||||
|
||||
To use HeroFS, you need to create a filesystem factory:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.hero.herofs
|
||||
|
||||
mut fs_factory := herofs.new()!
|
||||
```
|
||||
|
||||
### Creating a Filesystem
|
||||
|
||||
```v
|
||||
fs_id := fs_factory.fs.set(fs_factory.fs.new(
|
||||
name: 'my_filesystem'
|
||||
quota_bytes: 1000000000 // 1GB quota
|
||||
)!)!
|
||||
```
|
||||
|
||||
### Working with Directories
|
||||
|
||||
```v
|
||||
// Create root directory
|
||||
root_dir_id := fs_factory.fs_dir.set(fs_factory.fs_dir.new(
|
||||
name: 'root'
|
||||
fs_id: fs_id
|
||||
parent_id: 0
|
||||
)!)!
|
||||
|
||||
// Create subdirectory
|
||||
sub_dir_id := fs_factory.fs_dir.set(fs_factory.fs_dir.new(
|
||||
name: 'documents'
|
||||
fs_id: fs_id
|
||||
parent_id: root_dir_id
|
||||
)!)!
|
||||
```
|
||||
|
||||
### Working with Blobs
|
||||
|
||||
```v
|
||||
// Create a blob with binary data
|
||||
blob_id := fs_factory.fs_blob.set(fs_factory.fs_blob.new(
|
||||
data: content_bytes
|
||||
mime_type: 'text/plain'
|
||||
)!)!
|
||||
```
|
||||
|
||||
### Working with Files
|
||||
|
||||
```v
|
||||
// Create a file
|
||||
file_id := fs_factory.fs_file.set(fs_factory.fs_file.new(
|
||||
name: 'example.txt'
|
||||
fs_id: fs_id
|
||||
directories: [root_dir_id]
|
||||
blobs: [blob_id]
|
||||
)!)!
|
||||
```
|
||||
|
||||
### Working with Symbolic Links
|
||||
|
||||
```v
|
||||
// Create a symbolic link to a file
|
||||
symlink_id := fs_factory.fs_symlink.set(fs_factory.fs_symlink.new(
|
||||
name: 'example_link.txt'
|
||||
fs_id: fs_id
|
||||
parent_id: root_dir_id
|
||||
target_id: file_id
|
||||
target_type: .file
|
||||
)!)!
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
The HeroFS module provides the following main components:
|
||||
|
||||
- `FsFactory` - Main factory for accessing all filesystem components
|
||||
- `DBFs` - Filesystem operations
|
||||
- `DBFsDir` - Directory operations
|
||||
- `DBFsFile` - File operations
|
||||
- `DBFsSymlink` - Symbolic link operations
|
||||
- `DBFsBlob` - Binary data blob operations
|
||||
|
||||
Each component provides CRUD operations and specialized methods for filesystem management.
|
||||
|
||||
## Examples
|
||||
|
||||
Check the `examples/hero/herofs/` directory for detailed usage examples.
|
||||
@@ -79,10 +79,22 @@ pub fn (mut self DBFs) new(args FsArg) !Fs {
|
||||
}
|
||||
|
||||
pub fn (mut self DBFs) set(o Fs) !u32 {
|
||||
return self.db.set[Fs](o)!
|
||||
id := self.db.set[Fs](o)!
|
||||
|
||||
// Store name -> id mapping for lookups
|
||||
self.db.redis.hset('fs:names', o.name, id.str())!
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
pub fn (mut self DBFs) delete(id u32) ! {
|
||||
// Get the filesystem to retrieve its name
|
||||
fs := self.get(id)!
|
||||
|
||||
// Remove name -> id mapping
|
||||
self.db.redis.hdel('fs:names', fs.name)!
|
||||
|
||||
// Delete the filesystem
|
||||
self.db.delete[Fs](id)!
|
||||
}
|
||||
|
||||
|
||||
@@ -75,12 +75,12 @@ pub fn (mut self DBFsDir) set(o FsDir) !u32 {
|
||||
path_key := '${o.fs_id}:${o.parent_id}:${o.name}'
|
||||
self.db.redis.hset('fsdir:paths', path_key, id.str())!
|
||||
|
||||
// Store in filesystem's directory list
|
||||
self.db.redis.sadd('fsdir:fs:${o.fs_id}', id.str())!
|
||||
// Store in filesystem's directory list using hset
|
||||
self.db.redis.hset('fsdir:fs:${o.fs_id}', id.str(), id.str())!
|
||||
|
||||
// Store in parent's children list
|
||||
// Store in parent's children list using hset
|
||||
if o.parent_id > 0 {
|
||||
self.db.redis.sadd('fsdir:children:${o.parent_id}', id.str())!
|
||||
self.db.redis.hset('fsdir:children:${o.parent_id}', id.str(), id.str())!
|
||||
}
|
||||
|
||||
return id
|
||||
@@ -90,8 +90,8 @@ pub fn (mut self DBFsDir) delete(id u32) ! {
|
||||
// Get the directory info before deleting
|
||||
dir := self.get(id)!
|
||||
|
||||
// Check if directory has children
|
||||
children := self.db.redis.smembers('fsdir:children:${id}')!
|
||||
// Check if directory has children using hkeys
|
||||
children := self.db.redis.hkeys('fsdir:children:${id}')!
|
||||
if children.len > 0 {
|
||||
return error('Cannot delete directory ${dir.name} (ID: ${id}) because it has ${children.len} children')
|
||||
}
|
||||
@@ -100,12 +100,12 @@ pub fn (mut self DBFsDir) delete(id u32) ! {
|
||||
path_key := '${dir.fs_id}:${dir.parent_id}:${dir.name}'
|
||||
self.db.redis.hdel('fsdir:paths', path_key)!
|
||||
|
||||
// Remove from filesystem's directory list
|
||||
self.db.redis.srem('fsdir:fs:${dir.fs_id}', id.str())!
|
||||
// Remove from filesystem's directory list using hdel
|
||||
self.db.redis.hdel('fsdir:fs:${dir.fs_id}', id.str())!
|
||||
|
||||
// Remove from parent's children list
|
||||
// Remove from parent's children list using hdel
|
||||
if dir.parent_id > 0 {
|
||||
self.db.redis.srem('fsdir:children:${dir.parent_id}', id.str())!
|
||||
self.db.redis.hdel('fsdir:children:${dir.parent_id}', id.str())!
|
||||
}
|
||||
|
||||
// Delete the directory itself
|
||||
@@ -139,7 +139,7 @@ pub fn (mut self DBFsDir) get_by_path(fs_id u32, parent_id u32, name string) !Fs
|
||||
|
||||
// Get all directories in a filesystem
|
||||
pub fn (mut self DBFsDir) list_by_filesystem(fs_id u32) ![]FsDir {
|
||||
dir_ids := self.db.redis.smembers('fsdir:fs:${fs_id}')!
|
||||
dir_ids := self.db.redis.hkeys('fsdir:fs:${fs_id}')!
|
||||
mut dirs := []FsDir{}
|
||||
for id_str in dir_ids {
|
||||
dirs << self.get(id_str.u32())!
|
||||
@@ -149,7 +149,7 @@ pub fn (mut self DBFsDir) list_by_filesystem(fs_id u32) ![]FsDir {
|
||||
|
||||
// Get children of a directory
|
||||
pub fn (mut self DBFsDir) list_children(dir_id u32) ![]FsDir {
|
||||
child_ids := self.db.redis.smembers('fsdir:children:${dir_id}')!
|
||||
child_ids := self.db.redis.hkeys('fsdir:children:${dir_id}')!
|
||||
mut dirs := []FsDir{}
|
||||
for id_str in child_ids {
|
||||
dirs << self.get(id_str.u32())!
|
||||
@@ -159,8 +159,8 @@ pub fn (mut self DBFsDir) list_children(dir_id u32) ![]FsDir {
|
||||
|
||||
// Check if a directory has children
|
||||
pub fn (mut self DBFsDir) has_children(dir_id u32) !bool {
|
||||
count := self.db.redis.scard('fsdir:children:${dir_id}')!
|
||||
return count > 0
|
||||
keys := self.db.redis.hkeys('fsdir:children:${dir_id}')!
|
||||
return keys.len > 0
|
||||
}
|
||||
|
||||
// Rename a directory
|
||||
@@ -196,7 +196,7 @@ pub fn (mut self DBFsDir) move(id u32, new_parent_id u32) !u32 {
|
||||
|
||||
// Remove from old parent's children list
|
||||
if dir.parent_id > 0 {
|
||||
self.db.redis.srem('fsdir:children:${dir.parent_id}', id.str())!
|
||||
self.db.redis.hdel('fsdir:children:${dir.parent_id}', id.str())!
|
||||
}
|
||||
|
||||
// Update parent
|
||||
|
||||
@@ -181,16 +181,16 @@ pub fn (mut self DBFsFile) set(o FsFile) !u32 {
|
||||
path_key := '${dir_id}:${o.name}'
|
||||
self.db.redis.hset('fsfile:paths', path_key, id.str())!
|
||||
|
||||
// Add to directory's file list
|
||||
self.db.redis.sadd('fsfile:dir:${dir_id}', id.str())!
|
||||
// Add to directory's file list using hset
|
||||
self.db.redis.hset('fsfile:dir:${dir_id}', id.str(), id.str())!
|
||||
}
|
||||
|
||||
// Store in filesystem's file list
|
||||
self.db.redis.sadd('fsfile:fs:${o.fs_id}', id.str())!
|
||||
// Store in filesystem's file list using hset
|
||||
self.db.redis.hset('fsfile:fs:${o.fs_id}', id.str(), id.str())!
|
||||
|
||||
// Store by mimetype
|
||||
// Store by mimetype using hset
|
||||
if o.mime_type != '' {
|
||||
self.db.redis.sadd('fsfile:mime:${o.mime_type}', id.str())!
|
||||
self.db.redis.hset('fsfile:mime:${o.mime_type}', id.str(), id.str())!
|
||||
}
|
||||
|
||||
return id
|
||||
@@ -206,16 +206,16 @@ pub fn (mut self DBFsFile) delete(id u32) ! {
|
||||
path_key := '${dir_id}:${file.name}'
|
||||
self.db.redis.hdel('fsfile:paths', path_key)!
|
||||
|
||||
// Remove from directory's file list
|
||||
self.db.redis.srem('fsfile:dir:${dir_id}', id.str())!
|
||||
// Remove from directory's file list using hdel
|
||||
self.db.redis.hdel('fsfile:dir:${dir_id}', id.str())!
|
||||
}
|
||||
|
||||
// Remove from filesystem's file list
|
||||
self.db.redis.srem('fsfile:fs:${file.fs_id}', id.str())!
|
||||
// Remove from filesystem's file list using hdel
|
||||
self.db.redis.hdel('fsfile:fs:${file.fs_id}', id.str())!
|
||||
|
||||
// Remove from mimetype index
|
||||
// Remove from mimetype index using hdel
|
||||
if file.mime_type != '' {
|
||||
self.db.redis.srem('fsfile:mime:${file.mime_type}', id.str())!
|
||||
self.db.redis.hdel('fsfile:mime:${file.mime_type}', id.str())!
|
||||
}
|
||||
|
||||
// Delete the file itself
|
||||
@@ -249,7 +249,7 @@ pub fn (mut self DBFsFile) get_by_path(dir_id u32, name string) !FsFile {
|
||||
|
||||
// List files in a directory
|
||||
pub fn (mut self DBFsFile) list_by_directory(dir_id u32) ![]FsFile {
|
||||
file_ids := self.db.redis.smembers('fsfile:dir:${dir_id}')!
|
||||
file_ids := self.db.redis.hkeys('fsfile:dir:${dir_id}')!
|
||||
mut files := []FsFile{}
|
||||
for id_str in file_ids {
|
||||
files << self.get(id_str.u32())!
|
||||
@@ -259,7 +259,7 @@ pub fn (mut self DBFsFile) list_by_directory(dir_id u32) ![]FsFile {
|
||||
|
||||
// List files in a filesystem
|
||||
pub fn (mut self DBFsFile) list_by_filesystem(fs_id u32) ![]FsFile {
|
||||
file_ids := self.db.redis.smembers('fsfile:fs:${fs_id}')!
|
||||
file_ids := self.db.redis.hkeys('fsfile:fs:${fs_id}')!
|
||||
mut files := []FsFile{}
|
||||
for id_str in file_ids {
|
||||
files << self.get(id_str.u32())!
|
||||
@@ -269,7 +269,7 @@ pub fn (mut self DBFsFile) list_by_filesystem(fs_id u32) ![]FsFile {
|
||||
|
||||
// List files by mime type
|
||||
pub fn (mut self DBFsFile) list_by_mime_type(mime_type string) ![]FsFile {
|
||||
file_ids := self.db.redis.smembers('fsfile:mime:${mime_type}')!
|
||||
file_ids := self.db.redis.hkeys('fsfile:mime:${mime_type}')!
|
||||
mut files := []FsFile{}
|
||||
for id_str in file_ids {
|
||||
files << self.get(id_str.u32())!
|
||||
@@ -358,7 +358,7 @@ pub fn (mut self DBFsFile) move(id u32, new_directories []u32) !u32 {
|
||||
for dir_id in file.directories {
|
||||
path_key := '${dir_id}:${file.name}'
|
||||
self.db.redis.hdel('fsfile:paths', path_key)!
|
||||
self.db.redis.srem('fsfile:dir:${dir_id}', id.str())!
|
||||
self.db.redis.hdel('fsfile:dir:${dir_id}', id.str())!
|
||||
}
|
||||
|
||||
// Update directories
|
||||
|
||||
@@ -109,15 +109,15 @@ pub fn (mut self DBFsSymlink) set(o FsSymlink) !u32 {
|
||||
path_key := '${o.parent_id}:${o.name}'
|
||||
self.db.redis.hset('fssymlink:paths', path_key, id.str())!
|
||||
|
||||
// Add to parent's symlinks list
|
||||
self.db.redis.sadd('fssymlink:parent:${o.parent_id}', id.str())!
|
||||
// Add to parent's symlinks list using hset
|
||||
self.db.redis.hset('fssymlink:parent:${o.parent_id}', id.str(), id.str())!
|
||||
|
||||
// Store in filesystem's symlink list
|
||||
self.db.redis.sadd('fssymlink:fs:${o.fs_id}', id.str())!
|
||||
// Store in filesystem's symlink list using hset
|
||||
self.db.redis.hset('fssymlink:fs:${o.fs_id}', id.str(), id.str())!
|
||||
|
||||
// Store in target's referrers list
|
||||
// Store in target's referrers list using hset
|
||||
target_key := '${o.target_type}:${o.target_id}'
|
||||
self.db.redis.sadd('fssymlink:target:${target_key}', id.str())!
|
||||
self.db.redis.hset('fssymlink:target:${target_key}', id.str(), id.str())!
|
||||
|
||||
return id
|
||||
}
|
||||
@@ -130,15 +130,15 @@ pub fn (mut self DBFsSymlink) delete(id u32) ! {
|
||||
path_key := '${symlink.parent_id}:${symlink.name}'
|
||||
self.db.redis.hdel('fssymlink:paths', path_key)!
|
||||
|
||||
// Remove from parent's symlinks list
|
||||
self.db.redis.srem('fssymlink:parent:${symlink.parent_id}', id.str())!
|
||||
// Remove from parent's symlinks list using hdel
|
||||
self.db.redis.hdel('fssymlink:parent:${symlink.parent_id}', id.str())!
|
||||
|
||||
// Remove from filesystem's symlink list
|
||||
self.db.redis.srem('fssymlink:fs:${symlink.fs_id}', id.str())!
|
||||
// Remove from filesystem's symlink list using hdel
|
||||
self.db.redis.hdel('fssymlink:fs:${symlink.fs_id}', id.str())!
|
||||
|
||||
// Remove from target's referrers list
|
||||
// Remove from target's referrers list using hdel
|
||||
target_key := '${symlink.target_type}:${symlink.target_id}'
|
||||
self.db.redis.srem('fssymlink:target:${target_key}', id.str())!
|
||||
self.db.redis.hdel('fssymlink:target:${target_key}', id.str())!
|
||||
|
||||
// Delete the symlink itself
|
||||
self.db.delete[FsSymlink](id)!
|
||||
@@ -171,7 +171,7 @@ pub fn (mut self DBFsSymlink) get_by_path(parent_id u32, name string) !FsSymlink
|
||||
|
||||
// List symlinks in a parent directory
|
||||
pub fn (mut self DBFsSymlink) list_by_parent(parent_id u32) ![]FsSymlink {
|
||||
symlink_ids := self.db.redis.smembers('fssymlink:parent:${parent_id}')!
|
||||
symlink_ids := self.db.redis.hkeys('fssymlink:parent:${parent_id}')!
|
||||
mut symlinks := []FsSymlink{}
|
||||
for id_str in symlink_ids {
|
||||
symlinks << self.get(id_str.u32())!
|
||||
@@ -181,7 +181,7 @@ pub fn (mut self DBFsSymlink) list_by_parent(parent_id u32) ![]FsSymlink {
|
||||
|
||||
// List symlinks in a filesystem
|
||||
pub fn (mut self DBFsSymlink) list_by_filesystem(fs_id u32) ![]FsSymlink {
|
||||
symlink_ids := self.db.redis.smembers('fssymlink:fs:${fs_id}')!
|
||||
symlink_ids := self.db.redis.hkeys('fssymlink:fs:${fs_id}')!
|
||||
mut symlinks := []FsSymlink{}
|
||||
for id_str in symlink_ids {
|
||||
symlinks << self.get(id_str.u32())!
|
||||
@@ -192,7 +192,7 @@ pub fn (mut self DBFsSymlink) list_by_filesystem(fs_id u32) ![]FsSymlink {
|
||||
// List symlinks pointing to a target
|
||||
pub fn (mut self DBFsSymlink) list_by_target(target_type SymlinkTargetType, target_id u32) ![]FsSymlink {
|
||||
target_key := '${target_type}:${target_id}'
|
||||
symlink_ids := self.db.redis.smembers('fssymlink:target:${target_key}')!
|
||||
symlink_ids := self.db.redis.hkeys('fssymlink:target:${target_key}')!
|
||||
mut symlinks := []FsSymlink{}
|
||||
for id_str in symlink_ids {
|
||||
symlinks << self.get(id_str.u32())!
|
||||
@@ -231,8 +231,8 @@ pub fn (mut self DBFsSymlink) move(id u32, new_parent_id u32) !u32 {
|
||||
old_path_key := '${symlink.parent_id}:${symlink.name}'
|
||||
self.db.redis.hdel('fssymlink:paths', old_path_key)!
|
||||
|
||||
// Remove from old parent's symlinks list
|
||||
self.db.redis.srem('fssymlink:parent:${symlink.parent_id}', id.str())!
|
||||
// Remove from old parent's symlinks list using hdel
|
||||
self.db.redis.hdel('fssymlink:parent:${symlink.parent_id}', id.str())!
|
||||
|
||||
// Update parent
|
||||
symlink.parent_id = new_parent_id
|
||||
@@ -260,7 +260,7 @@ pub fn (mut self DBFsSymlink) redirect(id u32, new_target_id u32, new_target_typ
|
||||
|
||||
// Remove from old target's referrers list
|
||||
old_target_key := '${symlink.target_type}:${symlink.target_id}'
|
||||
self.db.redis.srem('fssymlink:target:${old_target_key}', id.str())!
|
||||
self.db.redis.hdel('fssymlink:target:${old_target_key}', id.str())!
|
||||
|
||||
// Update target
|
||||
symlink.target_id = new_target_id
|
||||
|
||||
289
lib/hero/herofs/specs.md
Normal file
289
lib/hero/herofs/specs.md
Normal file
@@ -0,0 +1,289 @@
|
||||
# HeroFS Specifications
|
||||
|
||||
This document provides detailed specifications for the HeroFS distributed filesystem implementation.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
HeroFS is built on top of HeroDB, which uses Redis as its storage backend. The filesystem is implemented as a collection of interconnected data structures that represent the various components of a filesystem:
|
||||
|
||||
1. **Fs** - Filesystem container
|
||||
2. **FsDir** - Directories
|
||||
3. **FsFile** - Files
|
||||
4. **FsSymlink** - Symbolic links
|
||||
5. **FsBlob** - Binary data chunks
|
||||
|
||||
All components inherit from the `Base` struct, which provides common fields like ID, name, description, timestamps, security policies, tags, and comments.
|
||||
|
||||
## Filesystem (Fs)
|
||||
|
||||
The `Fs` struct represents a filesystem as a top-level container:
|
||||
|
||||
```v
|
||||
@[heap]
|
||||
pub struct Fs {
|
||||
db.Base
|
||||
pub mut:
|
||||
name string
|
||||
group_id u32 // Associated group for permissions
|
||||
root_dir_id u32 // ID of root directory
|
||||
quota_bytes u64 // Storage quota in bytes
|
||||
used_bytes u64 // Current usage in bytes
|
||||
}
|
||||
```
|
||||
|
||||
### Key Features
|
||||
|
||||
- **Name-based identification**: Filesystems can be retrieved by name using efficient Redis hash sets
|
||||
- **Quota management**: Each filesystem has a storage quota and tracks current usage
|
||||
- **Root directory**: Each filesystem has a root directory ID that serves as the entry point
|
||||
- **Group association**: Filesystems can be associated with groups for permission management
|
||||
|
||||
### Methods
|
||||
|
||||
- `new()`: Create a new filesystem instance
|
||||
- `set()`: Save filesystem to database
|
||||
- `get()`: Retrieve filesystem by ID
|
||||
- `get_by_name()`: Retrieve filesystem by name
|
||||
- `delete()`: Remove filesystem from database
|
||||
- `exist()`: Check if filesystem exists
|
||||
- `list()`: List all filesystems
|
||||
- `increase_usage()`: Increase used bytes counter
|
||||
- `decrease_usage()`: Decrease used bytes counter
|
||||
- `check_quota()`: Verify if additional bytes would exceed quota
|
||||
|
||||
## Directory (FsDir)
|
||||
|
||||
The `FsDir` struct represents a directory in a filesystem:
|
||||
|
||||
```v
|
||||
@[heap]
|
||||
pub struct FsDir {
|
||||
db.Base
|
||||
pub mut:
|
||||
name string
|
||||
fs_id u32 // Associated filesystem
|
||||
parent_id u32 // Parent directory ID (0 for root)
|
||||
}
|
||||
```
|
||||
|
||||
### Key Features
|
||||
|
||||
- **Hierarchical structure**: Directories form a tree structure with parent-child relationships
|
||||
- **Path-based identification**: Efficient lookup by filesystem ID, parent ID, and name
|
||||
- **Children management**: Directories automatically track their children through Redis hash sets
|
||||
- **Cross-filesystem isolation**: Directories are bound to a specific filesystem
|
||||
|
||||
### Methods
|
||||
|
||||
- `new()`: Create a new directory instance
|
||||
- `set()`: Save directory to database and update indices
|
||||
- `get()`: Retrieve directory by ID
|
||||
- `delete()`: Remove directory (fails if it has children)
|
||||
- `exist()`: Check if directory exists
|
||||
- `list()`: List all directories
|
||||
- `get_by_path()`: Retrieve directory by path components
|
||||
- `list_by_filesystem()`: List directories in a filesystem
|
||||
- `list_children()`: List child directories
|
||||
- `has_children()`: Check if directory has children
|
||||
- `rename()`: Rename directory
|
||||
- `move()`: Move directory to a new parent
|
||||
|
||||
## File (FsFile)
|
||||
|
||||
The `FsFile` struct represents a file in a filesystem:
|
||||
|
||||
```v
|
||||
@[heap]
|
||||
pub struct FsFile {
|
||||
db.Base
|
||||
pub mut:
|
||||
name string
|
||||
fs_id u32 // Associated filesystem
|
||||
directories []u32 // Directory IDs where this file exists
|
||||
blobs []u32 // IDs of file content blobs
|
||||
size_bytes u64
|
||||
mime_type string // e.g., "image/png"
|
||||
checksum string // e.g., SHA256 checksum of the file
|
||||
accessed_at i64
|
||||
metadata map[string]string // Custom metadata
|
||||
}
|
||||
```
|
||||
|
||||
### Key Features
|
||||
|
||||
- **Multiple directory associations**: Files can exist in multiple directories (similar to hard links in Linux)
|
||||
- **Blob-based content**: File content is stored as references to FsBlob objects
|
||||
- **Size tracking**: Files track their total size in bytes
|
||||
- **MIME type support**: Files store their MIME type for content identification
|
||||
- **Checksum verification**: Files can store checksums for integrity verification
|
||||
- **Access timestamp**: Tracks when the file was last accessed
|
||||
- **Custom metadata**: Files support custom key-value metadata
|
||||
|
||||
### Methods
|
||||
|
||||
- `new()`: Create a new file instance
|
||||
- `set()`: Save file to database and update indices
|
||||
- `get()`: Retrieve file by ID
|
||||
- `delete()`: Remove file and update all indices
|
||||
- `exist()`: Check if file exists
|
||||
- `list()`: List all files
|
||||
- `get_by_path()`: Retrieve file by directory and name
|
||||
- `list_by_directory()`: List files in a directory
|
||||
- `list_by_filesystem()`: List files in a filesystem
|
||||
- `list_by_mime_type()`: List files by MIME type
|
||||
- `append_blob()`: Add a new blob to the file
|
||||
- `update_accessed()`: Update accessed timestamp
|
||||
- `update_metadata()`: Update file metadata
|
||||
- `rename()`: Rename file (affects all directories)
|
||||
- `move()`: Move file to different directories
|
||||
|
||||
## Symbolic Link (FsSymlink)
|
||||
|
||||
The `FsSymlink` struct represents a symbolic link in a filesystem:
|
||||
|
||||
```v
|
||||
@[heap]
|
||||
pub struct FsSymlink {
|
||||
db.Base
|
||||
pub mut:
|
||||
name string
|
||||
fs_id u32 // Associated filesystem
|
||||
parent_id u32 // Parent directory ID
|
||||
target_id u32 // ID of target file or directory
|
||||
target_type SymlinkTargetType
|
||||
}
|
||||
|
||||
pub enum SymlinkTargetType {
|
||||
file
|
||||
directory
|
||||
}
|
||||
```
|
||||
|
||||
### Key Features
|
||||
|
||||
- **Target type specification**: Symlinks can point to either files or directories
|
||||
- **Cross-filesystem protection**: Symlinks cannot point to targets in different filesystems
|
||||
- **Referrer tracking**: Targets know which symlinks point to them
|
||||
- **Broken link detection**: Symlinks can be checked for validity
|
||||
|
||||
### Methods
|
||||
|
||||
- `new()`: Create a new symbolic link instance
|
||||
- `set()`: Save symlink to database and update indices
|
||||
- `get()`: Retrieve symlink by ID
|
||||
- `delete()`: Remove symlink and update all indices
|
||||
- `exist()`: Check if symlink exists
|
||||
- `list()`: List all symlinks
|
||||
- `get_by_path()`: Retrieve symlink by parent directory and name
|
||||
- `list_by_parent()`: List symlinks in a parent directory
|
||||
- `list_by_filesystem()`: List symlinks in a filesystem
|
||||
- `list_by_target()`: List symlinks pointing to a target
|
||||
- `rename()`: Rename symlink
|
||||
- `move()`: Move symlink to a new parent directory
|
||||
- `redirect()`: Change symlink target
|
||||
- `resolve()`: Get the target ID of a symlink
|
||||
- `is_broken()`: Check if symlink target exists
|
||||
|
||||
## Binary Data Blob (FsBlob)
|
||||
|
||||
The `FsBlob` struct represents binary data chunks:
|
||||
|
||||
```v
|
||||
@[heap]
|
||||
pub struct FsBlob {
|
||||
db.Base
|
||||
pub mut:
|
||||
hash string // blake192 hash of content
|
||||
data []u8 // Binary data (max 1MB)
|
||||
size_bytes int // Size in bytes
|
||||
created_at i64
|
||||
mime_type string // MIME type
|
||||
encoding string // Encoding type
|
||||
}
|
||||
```
|
||||
|
||||
### Key Features
|
||||
|
||||
- **Content-based addressing**: Blobs are identified by their BLAKE3 hash (first 192 bits)
|
||||
- **Size limit**: Blobs are limited to 1MB to ensure efficient storage and retrieval
|
||||
- **Integrity verification**: Built-in hash verification for data integrity
|
||||
- **MIME type and encoding**: Blobs store their content type information
|
||||
- **Deduplication**: Identical content blobs are automatically deduplicated
|
||||
|
||||
### Methods
|
||||
|
||||
- `new()`: Create a new blob instance
|
||||
- `set()`: Save blob to database (returns existing ID if content already exists)
|
||||
- `get()`: Retrieve blob by ID
|
||||
- `delete()`: Remove blob from database
|
||||
- `exist()`: Check if blob exists
|
||||
- `list()`: List all blobs
|
||||
- `get_by_hash()`: Retrieve blob by content hash
|
||||
- `exists_by_hash()`: Check if blob exists by content hash
|
||||
- `verify_integrity()`: Verify blob data integrity against stored hash
|
||||
- `calculate_hash()`: Calculate BLAKE3 hash of blob data
|
||||
|
||||
## Storage Mechanisms
|
||||
|
||||
HeroFS uses Redis hash sets extensively for efficient indexing and lookup:
|
||||
|
||||
### Filesystem Indices
|
||||
- `fs:names` - Maps filesystem names to IDs
|
||||
- `fsdir:paths` - Maps directory path components to IDs
|
||||
- `fsdir:fs:${fs_id}` - Lists directories in a filesystem
|
||||
- `fsdir:children:${dir_id}` - Lists children of a directory
|
||||
- `fsfile:paths` - Maps file paths (directory:name) to IDs
|
||||
- `fsfile:dir:${dir_id}` - Lists files in a directory
|
||||
- `fsfile:fs:${fs_id}` - Lists files in a filesystem
|
||||
- `fsfile:mime:${mime_type}` - Lists files by MIME type
|
||||
- `fssymlink:paths` - Maps symlink paths (parent:name) to IDs
|
||||
- `fssymlink:parent:${parent_id}` - Lists symlinks in a parent directory
|
||||
- `fssymlink:fs:${fs_id}` - Lists symlinks in a filesystem
|
||||
- `fssymlink:target:${target_type}:${target_id}` - Lists symlinks pointing to a target
|
||||
- `fsblob:hashes` - Maps content hashes to blob IDs
|
||||
|
||||
### Data Serialization
|
||||
|
||||
All HeroFS components use the HeroLib encoder for serialization:
|
||||
|
||||
- Version tag (u8) is stored first
|
||||
- All fields are serialized in a consistent order
|
||||
- Deserialization follows the exact same order
|
||||
- Type safety is maintained through V's type system
|
||||
|
||||
## Special Features
|
||||
|
||||
### Hard Links
|
||||
Files can be associated with multiple directories through the `directories` field, allowing for hard link-like behavior.
|
||||
|
||||
### Deduplication
|
||||
Blobs are automatically deduplicated based on their content hash. When creating a new blob with identical content to an existing one, the existing ID is returned.
|
||||
|
||||
### Quota Management
|
||||
Filesystems track their storage usage and can enforce quotas to prevent overconsumption.
|
||||
|
||||
### Metadata Support
|
||||
Files support custom metadata as key-value pairs, allowing for flexible attribute storage.
|
||||
|
||||
### Cross-Component Validation
|
||||
When creating or modifying components, HeroFS validates references to other components:
|
||||
- Directory parent must exist
|
||||
- File directories must exist
|
||||
- File blobs must exist
|
||||
- Symlink parent must exist
|
||||
- Symlink target must exist and match target type
|
||||
|
||||
## Security Model
|
||||
|
||||
HeroFS inherits the security model from HeroDB:
|
||||
- Each component has a `securitypolicy` field referencing a SecurityPolicy object
|
||||
- Components can have associated tags for categorization
|
||||
- Components can have associated comments for documentation
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- All indices are stored as Redis hash sets for O(1) lookup performance
|
||||
- Blob deduplication reduces storage requirements
|
||||
- Multiple directory associations allow efficient file organization
|
||||
- Content-based addressing enables easy integrity verification
|
||||
- Factory pattern provides easy access to all filesystem components
|
||||
@@ -1,130 +0,0 @@
|
||||
# HeroModels OpenRPC Server
|
||||
|
||||
This module provides an OpenRPC server for HeroModels that runs over Unix domain sockets. It exposes comment management functionality through a JSON-RPC 2.0 interface.
|
||||
|
||||
## Features
|
||||
|
||||
- **Unix Socket Communication**: Efficient local communication via Unix domain sockets
|
||||
- **JSON-RPC 2.0 Protocol**: Standard JSON-RPC 2.0 implementation
|
||||
- **Comment Management**: Full CRUD operations for comments
|
||||
- **OpenRPC Specification**: Auto-generated OpenRPC spec via `discover` method
|
||||
- **Concurrent Handling**: Multiple client connections supported
|
||||
|
||||
## API Methods
|
||||
|
||||
### comment_get
|
||||
Retrieve comments by ID, author, or parent.
|
||||
|
||||
**Parameters:**
|
||||
- `id` (optional): Comment ID to retrieve
|
||||
- `author` (optional): Author ID to filter by
|
||||
- `parent` (optional): Parent comment ID to filter by
|
||||
|
||||
**Returns:** Comment object or array of comments
|
||||
|
||||
### comment_set
|
||||
Create a new comment.
|
||||
|
||||
**Parameters:**
|
||||
- `comment`: Comment text content
|
||||
- `parent`: Parent comment ID (0 for top-level)
|
||||
- `author`: Author user ID
|
||||
|
||||
**Returns:** Object with created comment ID
|
||||
|
||||
### comment_delete
|
||||
Delete a comment by ID.
|
||||
|
||||
**Parameters:**
|
||||
- `id`: Comment ID to delete
|
||||
|
||||
**Returns:** Success status and deleted comment ID
|
||||
|
||||
### comment_list
|
||||
List all comment IDs.
|
||||
|
||||
**Parameters:** None
|
||||
|
||||
**Returns:** Array of all comment IDs
|
||||
|
||||
### discover
|
||||
Get the OpenRPC specification for this service.
|
||||
|
||||
**Parameters:** None
|
||||
|
||||
**Returns:** Complete OpenRPC specification object
|
||||
|
||||
## Usage
|
||||
|
||||
### Starting the Server
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.hero.heromodels.openrpc
|
||||
|
||||
mut server := openrpc.new_rpc_server(socket_path: '/tmp/heromodels')!
|
||||
server.start()! // Blocks and serves requests
|
||||
```
|
||||
|
||||
### Example Client
|
||||
|
||||
```v
|
||||
import net.unix
|
||||
import json
|
||||
import freeflowuniverse.herolib.hero.heromodels.openrpc
|
||||
|
||||
// Connect to server
|
||||
mut conn := unix.connect_stream('/tmp/heromodels')!
|
||||
|
||||
// Create a comment
|
||||
request := openrpc.JsonRpcRequest{
|
||||
jsonrpc: '2.0'
|
||||
method: 'comment_set'
|
||||
params: json.encode({
|
||||
'comment': 'Hello World'
|
||||
'parent': 0
|
||||
'author': 1
|
||||
})
|
||||
id: 1
|
||||
}
|
||||
|
||||
// Send request
|
||||
conn.write_string(json.encode(request))!
|
||||
|
||||
// Read response
|
||||
mut buffer := []u8{len: 4096}
|
||||
bytes_read := conn.read(mut buffer)!
|
||||
response := buffer[..bytes_read].bytestr()
|
||||
```
|
||||
|
||||
## Files
|
||||
|
||||
- `server.v` - Main RPC server implementation
|
||||
- `types.v` - JSON-RPC and parameter type definitions
|
||||
- `comment.v` - Comment-specific RPC method implementations
|
||||
- `discover.v` - OpenRPC specification generation
|
||||
- `example.vsh` - Server example script
|
||||
- `client_example.vsh` - Client example script
|
||||
|
||||
## Running Examples
|
||||
|
||||
Start the server:
|
||||
```bash
|
||||
vrun lib/hero/heromodels/openrpc/example.vsh
|
||||
```
|
||||
|
||||
Test with client (in another terminal):
|
||||
```bash
|
||||
vrun lib/hero/heromodels/openrpc/client_example.vsh
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
- Redis (for data storage via heromodels)
|
||||
- Unix domain socket support
|
||||
- JSON encoding/decoding
|
||||
|
||||
## Socket Path
|
||||
|
||||
Default socket path: `/tmp/heromodels`
|
||||
|
||||
The socket file is automatically cleaned up when the server starts and stops.
|
||||
@@ -1,75 +0,0 @@
|
||||
module openrpc
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.schemas.openrpc
|
||||
import freeflowuniverse.herolib.hero.heromodels
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
import os
|
||||
|
||||
const openrpc_path = os.join_path(os.dir(@FILE), 'openrpc.json')
|
||||
|
||||
pub fn new_heromodels_handler() !openrpc.Handler {
|
||||
mut openrpc_handler := openrpc.Handler {
|
||||
specification: openrpc.new(path: openrpc_path)!
|
||||
}
|
||||
|
||||
openrpc_handler.register_procedure_handle('comment_get', comment_get)
|
||||
openrpc_handler.register_procedure_handle('comment_set', comment_set)
|
||||
openrpc_handler.register_procedure_handle('comment_delete', comment_delete)
|
||||
openrpc_handler.register_procedure_handle('comment_list', comment_list)
|
||||
|
||||
openrpc_handler.register_procedure_handle('calendar_get', calendar_get)
|
||||
openrpc_handler.register_procedure_handle('calendar_set', calendar_set)
|
||||
openrpc_handler.register_procedure_handle('calendar_delete', calendar_delete)
|
||||
openrpc_handler.register_procedure_handle('calendar_list', calendar_list)
|
||||
|
||||
return openrpc_handler
|
||||
}
|
||||
|
||||
pub fn comment_get(request jsonrpc.Request) !jsonrpc.Response {
|
||||
payload := jsonrpc.decode_payload[u32](request.params) or { return jsonrpc.invalid_params }
|
||||
result := heromodels.comment_get(payload) or { return jsonrpc.internal_error }
|
||||
return jsonrpc.new_response(request.id, json.encode(result))
|
||||
}
|
||||
|
||||
pub fn comment_set(request jsonrpc.Request) !jsonrpc.Response{
|
||||
payload := jsonrpc.decode_payload[heromodels.CommentArg](request.params) or { return jsonrpc.invalid_params }
|
||||
return jsonrpc.new_response(request.id, heromodels.comment_set(payload)!.str())
|
||||
}
|
||||
|
||||
pub fn comment_delete(request jsonrpc.Request) !jsonrpc.Response {
|
||||
payload := jsonrpc.decode_payload[u32](request.params) or { return jsonrpc.invalid_params }
|
||||
return jsonrpc.new_response(request.id, '')
|
||||
}
|
||||
|
||||
pub fn comment_list(request jsonrpc.Request) !jsonrpc.Response {
|
||||
result := heromodels.list[heromodels.Comment]() or { return jsonrpc.internal_error }
|
||||
return jsonrpc.new_response(request.id, json.encode(result))
|
||||
}
|
||||
|
||||
pub fn calendar_get(request jsonrpc.Request) !jsonrpc.Response {
|
||||
payload := jsonrpc.decode_payload[u32](request.params) or { return jsonrpc.invalid_params }
|
||||
result := heromodels.get[heromodels.Calendar](payload) or { return jsonrpc.internal_error }
|
||||
return jsonrpc.new_response(request.id, json.encode(result))
|
||||
}
|
||||
|
||||
pub fn calendar_set(request jsonrpc.Request) !jsonrpc.Response{
|
||||
mut payload := json.decode(heromodels.Calendar, request.params) or {
|
||||
return jsonrpc.invalid_params }
|
||||
id := heromodels.set[heromodels.Calendar](mut payload) or {
|
||||
println('error setting calendar $err')
|
||||
return jsonrpc.internal_error
|
||||
}
|
||||
return jsonrpc.new_response(request.id, id.str())
|
||||
}
|
||||
|
||||
pub fn calendar_delete(request jsonrpc.Request) !jsonrpc.Response {
|
||||
payload := jsonrpc.decode_payload[u32](request.params) or { return jsonrpc.invalid_params }
|
||||
heromodels.delete[heromodels.Calendar](payload) or { return jsonrpc.internal_error }
|
||||
return jsonrpc.new_response(request.id, '')
|
||||
}
|
||||
|
||||
pub fn calendar_list(request jsonrpc.Request) !jsonrpc.Response {
|
||||
result := heromodels.list[heromodels.Calendar]() or { return jsonrpc.internal_error }
|
||||
return jsonrpc.new_response(request.id, json.encode(result))
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
module openrpc
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.hero.heromodels
|
||||
|
||||
// Comment-specific argument structures
|
||||
@[params]
|
||||
pub struct CommentGetArgs {
|
||||
pub mut:
|
||||
id ?u32
|
||||
author ?u32
|
||||
parent ?u32
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct CommentDeleteArgs {
|
||||
pub mut:
|
||||
id u32
|
||||
}
|
||||
|
||||
// // comment_get retrieves comments based on the provided arguments
|
||||
// pub fn comment_get(params string) !string {
|
||||
// // Handle empty params
|
||||
// if params == 'null' || params == '{}' {
|
||||
// return error('No valid search criteria provided. Please specify id, author, or parent.')
|
||||
// }
|
||||
|
||||
// args := json.decode(CommentGetArgs, params)!
|
||||
|
||||
// // If ID is provided, get specific comment
|
||||
// if id := args.id {
|
||||
// comment := heromodels.comment_get(id)!
|
||||
// return json.encode(comment)
|
||||
// }
|
||||
|
||||
// // If author is provided, find comments by author
|
||||
// if author := args.author {
|
||||
// return get_comments_by_author(author)!
|
||||
// }
|
||||
|
||||
// // If parent is provided, find child comments
|
||||
// if parent := args.parent {
|
||||
// return get_comments_by_parent(parent)!
|
||||
// }
|
||||
|
||||
// return error('No valid search criteria provided. Please specify id, author, or parent.')
|
||||
// }
|
||||
|
||||
// // comment_set creates or updates a comment
|
||||
// pub fn comment_set(params string) !string {
|
||||
// comment_arg := json.decode(heromodels.CommentArgExtended, params)!
|
||||
// id := heromodels.comment_set(comment_arg)!
|
||||
// return json.encode({'id': id})
|
||||
// }
|
||||
|
||||
// // comment_delete removes a comment by ID
|
||||
// pub fn comment_delete(params string) !string {
|
||||
// args := json.decode(CommentDeleteArgs, params)!
|
||||
|
||||
// // Check if comment exists
|
||||
// if !heromodels.exists[heromodels.Comment](args.id)! {
|
||||
// return error('Comment with id ${args.id} does not exist')
|
||||
// }
|
||||
|
||||
// // Delete using core method
|
||||
// heromodels.delete[heromodels.Comment](args.id)!
|
||||
|
||||
// result_json := '{"success": true, "id": ${args.id}}'
|
||||
// return result_json
|
||||
// }
|
||||
|
||||
// // comment_list returns all comment IDs
|
||||
// pub fn comment_list() !string {
|
||||
// comments := heromodels.list[heromodels.Comment]()!
|
||||
// mut ids := []u32{}
|
||||
|
||||
// for comment in comments {
|
||||
// ids << comment.id
|
||||
// }
|
||||
|
||||
// return json.encode(ids)
|
||||
// }
|
||||
|
||||
// // Helper function to get comments by author
|
||||
// fn get_comments_by_author(author u32) !string {
|
||||
// all_comments := heromodels.list[heromodels.Comment]()!
|
||||
// mut matching_comments := []heromodels.Comment{}
|
||||
|
||||
// for comment in all_comments {
|
||||
// if comment.author == author {
|
||||
// matching_comments << comment
|
||||
// }
|
||||
// }
|
||||
|
||||
// return json.encode(matching_comments)
|
||||
// }
|
||||
|
||||
// // Helper function to get comments by parent
|
||||
// fn get_comments_by_parent(parent u32) !string {
|
||||
// all_comments := heromodels.list[heromodels.Comment]()!
|
||||
// mut matching_comments := []heromodels.Comment{}
|
||||
|
||||
// for comment in all_comments {
|
||||
// if comment.parent == parent {
|
||||
// matching_comments << comment
|
||||
// }
|
||||
// }
|
||||
|
||||
// return json.encode(matching_comments)
|
||||
// }
|
||||
@@ -1,9 +0,0 @@
|
||||
module openrpc
|
||||
|
||||
import freeflowuniverse.herolib.schemas.openrpc
|
||||
import freeflowuniverse.herolib.hero.heromodels
|
||||
|
||||
// new_heromodels_server creates a new HeroModels RPC server
|
||||
pub fn test_new_heromodels_handler() ! {
|
||||
handler := new_heromodels_handler()!
|
||||
}
|
||||
@@ -1,213 +0,0 @@
|
||||
{
|
||||
"openrpc": "1.0.0-rc1",
|
||||
"info": {
|
||||
"version": "1.0.0",
|
||||
"title": "HeroModels OpenRPC API",
|
||||
"description": "OpenRPC API for HeroModels comment management over Unix socket",
|
||||
"contact": {
|
||||
"name": "HeroLib Team",
|
||||
"url": "https://github.com/freeflowuniverse/herolib"
|
||||
}
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"name": "Unix Socket Server",
|
||||
"url": "${server.socket_path}",
|
||||
"description": "Unix domain socket server for HeroModels"
|
||||
}
|
||||
],
|
||||
"methods": [
|
||||
{
|
||||
"name": "comment_get",
|
||||
"description": "Retrieve comments by ID, author, or parent",
|
||||
"params": [
|
||||
{
|
||||
"name": "args",
|
||||
"description": "Comment search arguments",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"description": "Comment ID to retrieve"
|
||||
},
|
||||
"author": {
|
||||
"type": "integer",
|
||||
"description": "Author ID to filter by"
|
||||
},
|
||||
"parent": {
|
||||
"type": "integer",
|
||||
"description": "Parent comment ID to filter by"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "comments",
|
||||
"description": "Comment(s) matching the criteria",
|
||||
"schema": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$$ref": "#/components/schemas/Comment"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$$ref": "#/components/schemas/Comment"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "comment_set",
|
||||
"description": "Create a new comment",
|
||||
"params": [
|
||||
{
|
||||
"name": "comment",
|
||||
"description": "Comment data to create",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$$ref": "#/components/schemas/CommentArg"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "result",
|
||||
"description": "Created comment ID",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"description": "ID of the created comment"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "comment_delete",
|
||||
"description": "Delete a comment by ID",
|
||||
"params": [
|
||||
{
|
||||
"name": "args",
|
||||
"description": "Comment deletion arguments",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"description": "ID of comment to delete"
|
||||
}
|
||||
},
|
||||
"required": ["id"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "result",
|
||||
"description": "Deletion result",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"success": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"id": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "comment_list",
|
||||
"description": "List all comment IDs",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "ids",
|
||||
"description": "Array of all comment IDs",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "discover",
|
||||
"description": "Get the OpenRPC specification for this service",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "spec",
|
||||
"description": "OpenRPC specification",
|
||||
"schema": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"components": {
|
||||
"schemas": {
|
||||
"Comment": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"description": "Unique comment identifier"
|
||||
},
|
||||
"comment": {
|
||||
"type": "string",
|
||||
"description": "Comment text content"
|
||||
},
|
||||
"parent": {
|
||||
"type": "integer",
|
||||
"description": "Parent comment ID (0 if top-level)"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "integer",
|
||||
"description": "Unix timestamp of last update"
|
||||
},
|
||||
"author": {
|
||||
"type": "integer",
|
||||
"description": "Author user ID"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"comment",
|
||||
"parent",
|
||||
"updated_at",
|
||||
"author"
|
||||
]
|
||||
},
|
||||
"CommentArg": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
"type": "string",
|
||||
"description": "Comment text content"
|
||||
},
|
||||
"parent": {
|
||||
"type": "integer",
|
||||
"description": "Parent comment ID (0 if top-level)"
|
||||
},
|
||||
"author": {
|
||||
"type": "integer",
|
||||
"description": "Author user ID"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"comment",
|
||||
"author"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
module openrpc
|
||||
|
||||
import freeflowuniverse.herolib.schemas.openrpc
|
||||
|
||||
// HeroModelsServer extends the base openrpcserver.RPCServer with heromodels-specific functionality
|
||||
pub struct HeroModelsServer {
|
||||
openrpc.UNIXServer
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct HeroModelsServerArgs {
|
||||
pub mut:
|
||||
socket_path string = '/tmp/heromodels'
|
||||
}
|
||||
|
||||
// new_heromodels_server creates a new HeroModels RPC server
|
||||
pub fn new_heromodels_server(args HeroModelsServerArgs) !&HeroModelsServer {
|
||||
mut base_server := openrpc.new_unix_server(
|
||||
new_heromodels_handler()!,
|
||||
socket_path: args.socket_path
|
||||
)!
|
||||
|
||||
return &HeroModelsServer{
|
||||
UNIXServer: *base_server
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
distill vlang objects out of the calendr/contact/circle and create the missing parts
|
||||
|
||||
organze per root object which are @[heap] and in separate file with name.v
|
||||
|
||||
the rootobjects are
|
||||
|
||||
- user
|
||||
- group (which users are members and in which role can be admin, writer, reader, can be linked to subgroups)
|
||||
- calendar (references to event, group)
|
||||
- calendar_event (everything related to an event on calendar, link to one or more fs_file)
|
||||
- project (grouping per project, defines swimlanes and milestones this allows us to visualize as kanban, link to group, link to one or more fs_file )
|
||||
- project_issue (and issue is specific type, e.g. task, story, bug, question,…), issue is linked to project by id, also defined priority…, on which swimlane, deadline, assignees, … ,,,, has tags, link to one or more fs_file
|
||||
- chat_group (link to group, name/description/tags)
|
||||
- chat_message (link to chat_group, link to parent_chat_messages and what type of link e.g. reply or reference or? , status, … link to one or more fs_file)
|
||||
- fs = filesystem (link to group)
|
||||
- fs_dir = directory in filesystem, link to parent, link to group
|
||||
- fs_file (link to one or more fs_dir, list of references to blobs as blake192)
|
||||
- fs_symlink (can be link to dir or file)
|
||||
- fs_blob (the data itself, max size 1 MB, binary data, id = blake192)
|
||||
|
||||
the group’s define how people can interact with the parts e.g. calendar linked to group, so readers of that group can read and have copy of the info linked to that group
|
||||
|
||||
all the objects are identified by their blake192 (based on the content)
|
||||
|
||||
there is a special table which has link between blake192 and their previous & next version, so we can always walk the three, both parts are indexed (this is independent of type of object)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
34
lib/hero/heromodels/readme.md
Normal file
34
lib/hero/heromodels/readme.md
Normal file
@@ -0,0 +1,34 @@
|
||||
|
||||
|
||||
|
||||
## unix socket based RPC server
|
||||
|
||||
see lib/hero/heromodels/rpc/rpc_comment.v for an example of how to implement RPC methods.
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.hero.heromodels.rpc
|
||||
|
||||
#starts the rpc server
|
||||
rpc.start()!
|
||||
|
||||
```
|
||||
|
||||
```bash
|
||||
# to test the discover function, this returns the openrpc specification:
|
||||
echo '{"jsonrpc":"2.0","method":"rpc.discover","params":[],"id":1}' | nc -U /tmp/heromodels
|
||||
|
||||
# to test interactively:
|
||||
|
||||
nc -U /tmp/heromodels
|
||||
|
||||
# then e.g. paste following in
|
||||
|
||||
{"jsonrpc":"2.0","method":"comment_set","params":{"comment":"Hello world!","parent":0,"author":42},"id":1}
|
||||
|
||||
needs to be on one line for openrpc to work
|
||||
|
||||
```
|
||||
|
||||
see lib/hero/heromodels/rpc/openrpc.json for the full openrpc specification
|
||||
|
||||
|
||||
28
lib/hero/heromodels/rpc/factory.v
Normal file
28
lib/hero/heromodels/rpc/factory.v
Normal file
@@ -0,0 +1,28 @@
|
||||
module rpc
|
||||
|
||||
import freeflowuniverse.herolib.schemas.openrpc
|
||||
import os
|
||||
|
||||
const openrpc_path = os.join_path(os.dir(@FILE), 'openrpc.json')
|
||||
|
||||
@[params]
|
||||
pub struct ServerArgs {
|
||||
pub mut:
|
||||
socket_path string = '/tmp/heromodels'
|
||||
}
|
||||
|
||||
pub fn start(args ServerArgs) ! {
|
||||
mut openrpc_handler := openrpc.new_handler(openrpc_path)!
|
||||
|
||||
openrpc_handler.register_procedure_handle('comment_get', comment_get)
|
||||
openrpc_handler.register_procedure_handle('comment_set', comment_set)
|
||||
openrpc_handler.register_procedure_handle('comment_delete', comment_delete)
|
||||
openrpc_handler.register_procedure_handle('comment_list', comment_list)
|
||||
|
||||
openrpc_handler.register_procedure_handle('calendar_get', calendar_get)
|
||||
openrpc_handler.register_procedure_handle('calendar_set', calendar_set)
|
||||
openrpc_handler.register_procedure_handle('calendar_delete', calendar_delete)
|
||||
openrpc_handler.register_procedure_handle('calendar_list', calendar_list)
|
||||
|
||||
openrpc.start_unix_server(openrpc_handler, socket_path: args.socket_path)!
|
||||
}
|
||||
315
lib/hero/heromodels/rpc/openrpc.json
Normal file
315
lib/hero/heromodels/rpc/openrpc.json
Normal file
@@ -0,0 +1,315 @@
|
||||
{
|
||||
"openrpc": "1.0.0",
|
||||
"info": {
|
||||
"title": "Hero Models API",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"methods": [
|
||||
{
|
||||
"name": "comment_get",
|
||||
"summary": "Get a comment by ID",
|
||||
"params": [
|
||||
{
|
||||
"name": "id",
|
||||
"description": "ID of comment to fetch",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "comment",
|
||||
"description": "Comment object",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Comment"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "comment_set",
|
||||
"summary": "Create or update a comment",
|
||||
"params": [
|
||||
{
|
||||
"name": "comment",
|
||||
"description": "Comment text",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "parent",
|
||||
"description": "ID of parent comment if any, 0 means none",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "author",
|
||||
"description": "ID of the author user",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "id",
|
||||
"description": "ID of the created/updated comment",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "comment_delete",
|
||||
"summary": "Delete a comment by ID",
|
||||
"params": [
|
||||
{
|
||||
"name": "id",
|
||||
"description": "ID of comment to delete",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "result",
|
||||
"description": "Success result",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "comment_list",
|
||||
"summary": "List all comments",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "comments",
|
||||
"description": "List of all comment objects",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Comment"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "calendar_get",
|
||||
"summary": "Get a calendar by ID",
|
||||
"params": [
|
||||
{
|
||||
"name": "id",
|
||||
"description": "ID of calendar to fetch",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "calendar",
|
||||
"description": "Calendar object",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Calendar"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "calendar_set",
|
||||
"summary": "Create or update a calendar",
|
||||
"params": [
|
||||
{
|
||||
"name": "name",
|
||||
"description": "Name of the calendar",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "description",
|
||||
"description": "Description of the calendar",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "color",
|
||||
"description": "Hex color code for the calendar",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "timezone",
|
||||
"description": "Timezone of the calendar",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "is_public",
|
||||
"description": "Whether the calendar is public",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "events",
|
||||
"description": "IDs of calendar events",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "id",
|
||||
"description": "ID of the created/updated calendar",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "calendar_delete",
|
||||
"summary": "Delete a calendar by ID",
|
||||
"params": [
|
||||
{
|
||||
"name": "id",
|
||||
"description": "ID of calendar to delete",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "result",
|
||||
"description": "Success result",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "calendar_list",
|
||||
"summary": "List all calendars",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "calendars",
|
||||
"description": "List of all calendar objects",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Calendar"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"components": {
|
||||
"schemas": {
|
||||
"Base": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"created_at": {
|
||||
"type": "integer"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "integer"
|
||||
},
|
||||
"securitypolicy": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"tags": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"comments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Comment": {
|
||||
"title": "Comment",
|
||||
"description": "A comment object",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/components/schemas/Base"
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"comment": {
|
||||
"type": "string"
|
||||
},
|
||||
"parent": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"author": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"Calendar": {
|
||||
"title": "Calendar",
|
||||
"description": "A calendar object",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/components/schemas/Base"
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"events": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
},
|
||||
"color": {
|
||||
"type": "string"
|
||||
},
|
||||
"timezone": {
|
||||
"type": "string"
|
||||
},
|
||||
"is_public": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
79
lib/hero/heromodels/rpc/rpc_calendar.v
Normal file
79
lib/hero/heromodels/rpc/rpc_calendar.v
Normal file
@@ -0,0 +1,79 @@
|
||||
module rpc
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc { Request, Response, new_response_true, new_response_u32 }
|
||||
import freeflowuniverse.herolib.hero.heromodels
|
||||
|
||||
// Calendar-specific argument structures
|
||||
@[params]
|
||||
pub struct CalendarGetArgs {
|
||||
pub mut:
|
||||
id u32 @[required]
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct CalendarSetArgs {
|
||||
pub mut:
|
||||
name string @[required]
|
||||
description string
|
||||
color string
|
||||
timezone string
|
||||
is_public bool
|
||||
events []u32
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct CalendarDeleteArgs {
|
||||
pub mut:
|
||||
id u32 @[required]
|
||||
}
|
||||
|
||||
pub fn calendar_get(request Request) !Response {
|
||||
payload := jsonrpc.decode_payload[CalendarGetArgs](request.params) or {
|
||||
return jsonrpc.invalid_params
|
||||
}
|
||||
|
||||
mut mydb := heromodels.new()!
|
||||
calendar := mydb.calendar.get(payload.id)!
|
||||
|
||||
return jsonrpc.new_response(request.id, json.encode(calendar))
|
||||
}
|
||||
|
||||
pub fn calendar_set(request Request) !Response {
|
||||
payload := jsonrpc.decode_payload[CalendarSetArgs](request.params) or {
|
||||
return jsonrpc.invalid_params
|
||||
}
|
||||
|
||||
mut mydb := heromodels.new()!
|
||||
mut calendar_obj := mydb.calendar.new(
|
||||
name: payload.name
|
||||
description: payload.description
|
||||
color: payload.color
|
||||
timezone: payload.timezone
|
||||
is_public: payload.is_public
|
||||
events: payload.events
|
||||
)!
|
||||
|
||||
id := mydb.calendar.set(calendar_obj)!
|
||||
|
||||
return new_response_u32(request.id, id)
|
||||
}
|
||||
|
||||
pub fn calendar_delete(request Request) !Response {
|
||||
payload := jsonrpc.decode_payload[CalendarDeleteArgs](request.params) or {
|
||||
return jsonrpc.invalid_params
|
||||
}
|
||||
|
||||
mut mydb := heromodels.new()!
|
||||
mydb.calendar.delete(payload.id)!
|
||||
|
||||
// returns
|
||||
return new_response_true(request.id) // return true as jsonrpc (bool)
|
||||
}
|
||||
|
||||
pub fn calendar_list(request Request) !Response {
|
||||
mut mydb := heromodels.new()!
|
||||
calendars := mydb.calendar.list()!
|
||||
|
||||
return jsonrpc.new_response(request.id, json.encode(calendars))
|
||||
}
|
||||
72
lib/hero/heromodels/rpc/rpc_comment.v
Normal file
72
lib/hero/heromodels/rpc/rpc_comment.v
Normal file
@@ -0,0 +1,72 @@
|
||||
module rpc
|
||||
|
||||
import json
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc { Request, Response, new_response_true, new_response_u32 }
|
||||
import freeflowuniverse.herolib.hero.heromodels
|
||||
|
||||
// Comment-specific argument structures
|
||||
@[params]
|
||||
pub struct CommentGetArgs {
|
||||
pub mut:
|
||||
id u32 @[required]
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct CommentSetArgs {
|
||||
pub mut:
|
||||
comment string @[required]
|
||||
parent u32
|
||||
author u32
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct CommentDeleteArgs {
|
||||
pub mut:
|
||||
id u32 @[required]
|
||||
}
|
||||
|
||||
pub fn comment_get(request Request) !Response {
|
||||
payload := jsonrpc.decode_payload[CommentGetArgs](request.params) or {
|
||||
return jsonrpc.invalid_params
|
||||
}
|
||||
|
||||
mut mydb := heromodels.new()!
|
||||
comment := mydb.comments.get(payload.id)!
|
||||
|
||||
return jsonrpc.new_response(request.id, json.encode(comment))
|
||||
}
|
||||
|
||||
pub fn comment_set(request Request) !Response {
|
||||
payload := jsonrpc.decode_payload[CommentSetArgs](request.params) or {
|
||||
return jsonrpc.invalid_params
|
||||
}
|
||||
|
||||
mut mydb := heromodels.new()!
|
||||
mut comment_obj := mydb.comments.new(
|
||||
comment: payload.comment
|
||||
parent: payload.parent
|
||||
author: payload.author
|
||||
)!
|
||||
|
||||
id := mydb.comments.set(comment_obj)!
|
||||
|
||||
return new_response_u32(request.id, id)
|
||||
}
|
||||
|
||||
pub fn comment_delete(request Request) !Response {
|
||||
payload := jsonrpc.decode_payload[CommentDeleteArgs](request.params) or {
|
||||
return jsonrpc.invalid_params
|
||||
}
|
||||
|
||||
mut mydb := heromodels.new()!
|
||||
mydb.comments.delete(payload.id)!
|
||||
|
||||
return new_response_true(request.id) // return true as jsonrpc (bool)
|
||||
}
|
||||
|
||||
pub fn comment_list(request Request) !Response {
|
||||
mut mydb := heromodels.new()!
|
||||
comments := mydb.comments.list()!
|
||||
|
||||
return jsonrpc.new_response(request.id, json.encode(comments))
|
||||
}
|
||||
@@ -40,6 +40,38 @@ pub fn new_response(id int, result string) Response {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_response_true(id int) Response {
|
||||
return Response{
|
||||
jsonrpc: jsonrpc_version
|
||||
result: 'true'
|
||||
id: id
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_response_false(id int) Response {
|
||||
return Response{
|
||||
jsonrpc: jsonrpc_version
|
||||
result: 'false'
|
||||
id: id
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_response_int(id int, result int) Response {
|
||||
return Response{
|
||||
jsonrpc: jsonrpc_version
|
||||
result: result.str()
|
||||
id: id
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_response_u32(id int, result u32) Response {
|
||||
return Response{
|
||||
jsonrpc: jsonrpc_version
|
||||
result: result.str()
|
||||
id: id
|
||||
}
|
||||
}
|
||||
|
||||
// new_error_response creates an error JSON-RPC response with the given error object.
|
||||
//
|
||||
// Parameters:
|
||||
@@ -65,7 +97,9 @@ pub fn new_error_response(id int, error RPCError) Response {
|
||||
// Returns:
|
||||
// - A Response object or an error if parsing fails or the response is invalid
|
||||
pub fn decode_response(data string) !Response {
|
||||
raw := json2.raw_decode(data) or { return error('Failed to decode JSONRPC response ${data}\n${err}') }
|
||||
raw := json2.raw_decode(data) or {
|
||||
return error('Failed to decode JSONRPC response ${data}\n${err}')
|
||||
}
|
||||
raw_map := raw.as_map()
|
||||
|
||||
// Validate that the response contains either result or error, but not both or neither
|
||||
|
||||
302
lib/schemas/openrpc/_archive/testdata/openrpc.json
vendored
Normal file
302
lib/schemas/openrpc/_archive/testdata/openrpc.json
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
{
|
||||
"openrpc": "1.0.0-rc1",
|
||||
"info": {
|
||||
"version": "1.0.0",
|
||||
"title": "Petstore",
|
||||
"license": {
|
||||
"name": "MIT"
|
||||
}
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"name": "localhost",
|
||||
"url": "http://localhost:8080"
|
||||
}
|
||||
],
|
||||
"methods": [
|
||||
{
|
||||
"name": "list_pets",
|
||||
"summary": "List all pets",
|
||||
"tags": [
|
||||
{
|
||||
"name": "pets"
|
||||
}
|
||||
],
|
||||
"params": [
|
||||
{
|
||||
"name": "limit",
|
||||
"description": "How many items to return at one time (max 100)",
|
||||
"required": false,
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 1
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "pets",
|
||||
"description": "A paged array of pets",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
}
|
||||
},
|
||||
"errors": [
|
||||
{
|
||||
"code": 100,
|
||||
"message": "pets busy"
|
||||
}
|
||||
],
|
||||
"examples": [
|
||||
{
|
||||
"name": "listPetExample",
|
||||
"description": "List pet example",
|
||||
"params": [
|
||||
{
|
||||
"name": "limit",
|
||||
"value": 1
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "listPetResultExample",
|
||||
"value": [
|
||||
{
|
||||
"id": 7,
|
||||
"name": "fluffy",
|
||||
"tag": "poodle"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "create_pet",
|
||||
"summary": "Create a pet",
|
||||
"tags": [
|
||||
{
|
||||
"name": "pets"
|
||||
}
|
||||
],
|
||||
"params": [
|
||||
{
|
||||
"name": "newPetName",
|
||||
"description": "Name of pet to create",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "newPetTag",
|
||||
"description": "Pet tag to create",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"examples": [
|
||||
{
|
||||
"name": "createPetExample",
|
||||
"description": "Create pet example",
|
||||
"params": [
|
||||
{
|
||||
"name": "newPetName",
|
||||
"value": "fluffy"
|
||||
},
|
||||
{
|
||||
"name": "newPetTag",
|
||||
"value": "poodle"
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"value": 7
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"$ref": "#/components/contentDescriptors/PetId"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "get_pet",
|
||||
"summary": "Info for a specific pet",
|
||||
"tags": [
|
||||
{
|
||||
"name": "pets"
|
||||
}
|
||||
],
|
||||
"params": [
|
||||
{
|
||||
"$ref": "#/components/contentDescriptors/PetId"
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "pet",
|
||||
"description": "Expected response to a valid request",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
},
|
||||
"examples": [
|
||||
{
|
||||
"name": "getPetExample",
|
||||
"description": "Get pet example",
|
||||
"params": [
|
||||
{
|
||||
"name": "petId",
|
||||
"value": 7
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "getPetExampleResult",
|
||||
"value": {
|
||||
"name": "fluffy",
|
||||
"tag": "poodle",
|
||||
"id": 7
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "update_pet",
|
||||
"summary": "Update a pet",
|
||||
"tags": [
|
||||
{
|
||||
"name": "pets"
|
||||
}
|
||||
],
|
||||
"params": [
|
||||
{
|
||||
"$ref": "#/components/contentDescriptors/PetId"
|
||||
},
|
||||
{
|
||||
"name": "updatedPetName",
|
||||
"description": "New name for the pet",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "updatedPetTag",
|
||||
"description": "New tag for the pet",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "pet",
|
||||
"description": "Updated pet object",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/Pet"
|
||||
}
|
||||
},
|
||||
"examples": [
|
||||
{
|
||||
"name": "updatePetExample",
|
||||
"description": "Update pet example",
|
||||
"params": [
|
||||
{
|
||||
"name": "petId",
|
||||
"value": 7
|
||||
},
|
||||
{
|
||||
"name": "updatedPetName",
|
||||
"value": "fluffy updated"
|
||||
},
|
||||
{
|
||||
"name": "updatedPetTag",
|
||||
"value": "golden retriever"
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "updatePetExampleResult",
|
||||
"value": {
|
||||
"name": "fluffy updated",
|
||||
"tag": "golden retriever",
|
||||
"id": 7
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "delete_pet",
|
||||
"summary": "Delete a pet",
|
||||
"tags": [
|
||||
{
|
||||
"name": "pets"
|
||||
}
|
||||
],
|
||||
"params": [
|
||||
{
|
||||
"$ref": "#/components/contentDescriptors/PetId"
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"description": "Boolean indicating success",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"examples": [
|
||||
{
|
||||
"name": "deletePetExample",
|
||||
"description": "Delete pet example",
|
||||
"params": [
|
||||
{
|
||||
"name": "petId",
|
||||
"value": 7
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "deletePetExampleResult",
|
||||
"value": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"components": {
|
||||
"contentDescriptors": {
|
||||
"PetId": {
|
||||
"name": "petId",
|
||||
"required": true,
|
||||
"description": "The ID of the pet",
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/PetId"
|
||||
}
|
||||
}
|
||||
},
|
||||
"schemas": {
|
||||
"PetId": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"Pet": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"$ref": "#/components/schemas/PetId"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"tag": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,7 @@ pub fn new(params Params) !OpenRPC {
|
||||
}
|
||||
|
||||
text := if params.path != '' {
|
||||
os.read_file(params.path)!
|
||||
os.read_file(params.path) or { return error('Could not read openrpc spec file at ${params.path}: ${err}') }
|
||||
} else {
|
||||
params.text
|
||||
}
|
||||
|
||||
16
lib/schemas/openrpc/handler_factory.v
Normal file
16
lib/schemas/openrpc/handler_factory.v
Normal file
@@ -0,0 +1,16 @@
|
||||
module openrpc
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
|
||||
//path to openrpc.json file
|
||||
pub fn new_handler(openrpc_path string) !Handler {
|
||||
|
||||
mut openrpc_handler := openrpc.Handler {
|
||||
specification: new(path: openrpc_path)!
|
||||
}
|
||||
|
||||
return openrpc_handler
|
||||
|
||||
}
|
||||
@@ -9,10 +9,10 @@ import freeflowuniverse.herolib.schemas.jsonschema { Reference, SchemaRef }
|
||||
pub struct OpenRPC {
|
||||
pub mut:
|
||||
openrpc string = '1.0.0' // This string MUST be the semantic version number of the OpenRPC Specification version that the OpenRPC document uses.
|
||||
info Info @[omitempty] // Provides metadata about the API.
|
||||
servers []Server @[omitempty]// An array of Server Objects, which provide connectivity information to a target server.
|
||||
methods []Method @[omitempty]// The available methods for the API.
|
||||
components Components @[omitempty] // An element to hold various schemas for the specification.
|
||||
info Info @[omitempty] // Provides metadata about the API.
|
||||
servers []Server @[omitempty] // An array of Server Objects, which provide connectivity information to a target server.
|
||||
methods []Method @[omitempty] // The available methods for the API.
|
||||
components Components @[omitempty] // An element to hold various schemas for the specification.
|
||||
external_docs []ExternalDocs @[json: externalDocs; omitempty] // Additional external documentation.
|
||||
}
|
||||
|
||||
@@ -20,12 +20,12 @@ pub mut:
|
||||
// The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.
|
||||
pub struct Info {
|
||||
pub:
|
||||
title string @[omitempty] // The title of the application.
|
||||
description string @[omitempty] // A verbose description of the application.
|
||||
title string @[omitempty] // The title of the application.
|
||||
description string @[omitempty] // A verbose description of the application.
|
||||
terms_of_service string @[json: termsOfService; omitempty] // A URL to the Terms of Service for the API. MUST be in the format of a URL.
|
||||
contact Contact @[omitempty] // The contact information for the exposed API.
|
||||
license License @[omitempty] // The license information for the exposed API.
|
||||
version string @[omitempty] // The version of the OpenRPC document (which is distinct from the OpenRPC Specification version or the API implementation version).
|
||||
contact Contact @[omitempty] // The contact information for the exposed API.
|
||||
license License @[omitempty] // The license information for the exposed API.
|
||||
version string @[omitempty] // The version of the OpenRPC document (which is distinct from the OpenRPC Specification version or the API implementation version).
|
||||
}
|
||||
|
||||
// Contact information for the exposed API.
|
||||
@@ -168,11 +168,11 @@ pub:
|
||||
pub struct Components {
|
||||
pub mut:
|
||||
content_descriptors map[string]ContentDescriptorRef @[json: contentDescriptors; omitempty] // An object to hold reusable Content Descriptor Objects.
|
||||
schemas map[string]SchemaRef @[omitempty] // An object to hold reusable Schema Objects.
|
||||
examples map[string]Example @[omitempty] // An object to hold reusable Example Objects.
|
||||
links map[string]Link @[omitempty] // An object to hold reusable Link Objects.
|
||||
error map[string]Error @[omitempty] // An object to hold reusable Error Objects.
|
||||
example_pairing_objects map[string]ExamplePairing @[json: examplePairingObjects; omitempty] // An object to hold reusable Example Pairing Objects.
|
||||
schemas map[string]SchemaRef @[omitempty] // An object to hold reusable Schema Objects.
|
||||
examples map[string]Example @[omitempty] // An object to hold reusable Example Objects.
|
||||
links map[string]Link @[omitempty] // An object to hold reusable Link Objects.
|
||||
error map[string]Error @[omitempty] // An object to hold reusable Error Objects.
|
||||
example_pairing_objects map[string]ExamplePairing @[json: examplePairingObjects; omitempty] // An object to hold reusable Example Pairing Objects.
|
||||
tags map[string]Tag // An object to hold reusable Tag Objects.
|
||||
}
|
||||
|
||||
|
||||
65
lib/schemas/openrpc/readme.md
Normal file
65
lib/schemas/openrpc/readme.md
Normal file
@@ -0,0 +1,65 @@
|
||||
|
||||
# OpenRPC Module
|
||||
|
||||
This module provides a complete implementation of the [OpenRPC specification](https://open-rpc.org) for V, enabling structured JSON-RPC 2.0 API development with schema-based validation and automatic documentation.
|
||||
|
||||
## Purpose
|
||||
|
||||
- Define and validate JSON-RPC APIs using OpenRPC schema definitions
|
||||
- Handle JSON-RPC requests/responses over HTTP or Unix sockets
|
||||
- Automatic discovery endpoint (`rpc.discover`) for API documentation
|
||||
- Type-safe request/response handling
|
||||
- Support for reusable components (schemas, parameters, errors, examples)
|
||||
|
||||
## Usage
|
||||
|
||||
### 1. Create an OpenRPC Handler
|
||||
|
||||
Create a handler with your OpenRPC specification:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.schemas.openrpc
|
||||
|
||||
// From file path
|
||||
mut handler := openrpc.new_handler('path/to/openrpc.json')!
|
||||
|
||||
// From specification text
|
||||
mut handler := openrpc.new(text: spec_json)!
|
||||
```
|
||||
|
||||
### 2. Register Methods
|
||||
|
||||
Register your method handlers to process incoming JSON-RPC requests:
|
||||
|
||||
```v
|
||||
fn my_method(request jsonrpc.Request) !jsonrpc.Response {
|
||||
// Decode parameters
|
||||
mut params := json.decode(MyParams, request.params) or {
|
||||
return jsonrpc.invalid_params
|
||||
}
|
||||
|
||||
// Process logic
|
||||
result := process_my_method(params)
|
||||
|
||||
// Return response
|
||||
return jsonrpc.new_response(request.id, json.encode(result))
|
||||
}
|
||||
|
||||
// Register the method
|
||||
handler.register_procedure_handle('my.method', my_method)
|
||||
```
|
||||
|
||||
### 3. Start Server
|
||||
|
||||
Launch the server using either HTTP or Unix socket transport:
|
||||
|
||||
```v
|
||||
// HTTP server
|
||||
mut controller := openrpc.new_http_controller(handler)
|
||||
controller.run(port: 8080)
|
||||
|
||||
// Unix socket server
|
||||
mut server := openrpc.new_unix_server(handler)!
|
||||
server.start()
|
||||
```
|
||||
|
||||
@@ -20,6 +20,11 @@ pub mut:
|
||||
socket_path string = '/tmp/heromodels'
|
||||
}
|
||||
|
||||
pub fn start_unix_server(handler Handler, params UNIXServerParams) ! {
|
||||
mut server := new_unix_server(handler, params)!
|
||||
server.start()!
|
||||
}
|
||||
|
||||
pub fn new_unix_server(handler Handler, params UNIXServerParams) !&UNIXServer {
|
||||
// Remove existing socket file if it exists
|
||||
if os.exists(params.socket_path) {
|
||||
|
||||
@@ -27,21 +27,21 @@ pub fn test_new_unix_server() ! {
|
||||
// client()
|
||||
}
|
||||
|
||||
pub fn test_unix_server_start() ! {
|
||||
mut spec := OpenRPC{}
|
||||
handler := Handler{
|
||||
specification: new(path: openrpc_path)!
|
||||
}
|
||||
mut server := new_unix_server(handler)!
|
||||
// pub fn test_unix_server_start() ! {
|
||||
// mut spec := OpenRPC{}
|
||||
// handler := Handler{
|
||||
// specification: new(path: openrpc_path)!
|
||||
// }
|
||||
// mut server := new_unix_server(handler)!
|
||||
|
||||
defer {
|
||||
server.close() or {panic(err)}
|
||||
}
|
||||
// defer {
|
||||
// server.close() or {panic(err)}
|
||||
// }
|
||||
|
||||
spawn server.start()
|
||||
// spawn server.start()
|
||||
|
||||
// client()
|
||||
}
|
||||
// // client()
|
||||
// }
|
||||
|
||||
pub fn test_unix_server_handle_connection() ! {
|
||||
mut spec := OpenRPC{}
|
||||
@@ -54,7 +54,7 @@ pub fn test_unix_server_handle_connection() ! {
|
||||
spawn server.start()
|
||||
|
||||
// Give server time to start
|
||||
// time.sleep(50 * time.millisecond)
|
||||
time.sleep(50 * time.millisecond)
|
||||
|
||||
// Connect to the server
|
||||
mut conn := unix.connect_stream(server.socket_path)!
|
||||
@@ -63,6 +63,7 @@ pub fn test_unix_server_handle_connection() ! {
|
||||
conn.close() or {panic(err)}
|
||||
server.close() or {panic(err)}
|
||||
}
|
||||
println('Connected to server at ${server.socket_path}')
|
||||
|
||||
// Test 1: Send rpc.discover request
|
||||
discover_request := jsonrpc.new_request('rpc.discover', '')
|
||||
|
||||
@@ -1,31 +1,31 @@
|
||||
module crun
|
||||
|
||||
|
||||
pub fn example_heropods_compatible() ! {
|
||||
mut configs := map[string]&CrunConfig{}
|
||||
// Create a container configuration compatible with heropods template
|
||||
mut config := new(mut configs, name: 'heropods-example')!
|
||||
|
||||
// Configure to match the template
|
||||
|
||||
// Configure to match the template - disable terminal for background containers
|
||||
config.set_terminal(false)
|
||||
config.set_command(['/bin/sh'])
|
||||
.set_working_dir('/')
|
||||
.set_user(0, 0, [])
|
||||
.add_env('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin')
|
||||
.add_env('TERM', 'xterm')
|
||||
.set_rootfs('${rootfs_path}', false) // This will be replaced by the actual path
|
||||
.set_hostname('container')
|
||||
.set_no_new_privileges(true)
|
||||
|
||||
config.set_working_dir('/')
|
||||
config.set_user(0, 0, [])
|
||||
config.add_env('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin')
|
||||
config.add_env('TERM', 'xterm')
|
||||
config.set_rootfs('/tmp/rootfs', false) // This will be replaced by the actual path
|
||||
config.set_hostname('container')
|
||||
config.set_no_new_privileges(true)
|
||||
|
||||
// Add the specific rlimit from template
|
||||
config.add_rlimit(.rlimit_nofile, 1024, 1024)
|
||||
|
||||
|
||||
// Validate the configuration
|
||||
config.validate()!
|
||||
|
||||
|
||||
// Generate and print JSON
|
||||
json_output := config.to_json()!
|
||||
println(json_output)
|
||||
|
||||
|
||||
// Save to file
|
||||
config.save_to_file('/tmp/heropods_config.json')!
|
||||
println('Heropods-compatible configuration saved to /tmp/heropods_config.json')
|
||||
@@ -35,33 +35,34 @@ pub fn example_custom() ! {
|
||||
mut configs := map[string]&CrunConfig{}
|
||||
// Create a more complex container configuration
|
||||
mut config := new(mut configs, name: 'custom-container')!
|
||||
|
||||
|
||||
config.set_command(['/usr/bin/my-app', '--config', '/etc/myapp/config.yaml'])
|
||||
.set_working_dir('/app')
|
||||
.set_user(1000, 1000, [1001, 1002])
|
||||
.add_env('MY_VAR', 'my_value')
|
||||
.add_env('ANOTHER_VAR', 'another_value')
|
||||
.set_rootfs('/path/to/rootfs', false)
|
||||
.set_hostname('my-custom-container')
|
||||
.set_memory_limit(1024 * 1024 * 1024) // 1GB
|
||||
.set_cpu_limits(100000, 50000, 1024) // period, quota, shares
|
||||
.set_pids_limit(500)
|
||||
.add_mount('/host/path', '/container/path', .bind, [.rw])
|
||||
.add_mount('/tmp/cache', '/app/cache', .tmpfs, [.rw, .noexec])
|
||||
.add_capability(.cap_sys_admin)
|
||||
.remove_capability(.cap_net_raw)
|
||||
.add_rlimit(.rlimit_nproc, 100, 50)
|
||||
.set_no_new_privileges(true)
|
||||
|
||||
config.set_working_dir('/app')
|
||||
config.set_user(1000, 1000, [1001, 1002])
|
||||
config.add_env('MY_VAR', 'my_value')
|
||||
config.add_env('ANOTHER_VAR', 'another_value')
|
||||
config.set_rootfs('/path/to/rootfs', false)
|
||||
config.set_hostname('my-custom-container')
|
||||
config.set_memory_limit(1024 * 1024 * 1024) // 1GB
|
||||
config.set_cpu_limits(100000, 50000, 1024) // period, quota, shares
|
||||
config.set_pids_limit(500)
|
||||
config.add_mount('/host/path', '/container/path', .bind, [.rw])
|
||||
config.add_mount('/tmp/cache', '/app/cache', .tmpfs, [.rw, .noexec])
|
||||
config.add_capability(.cap_sys_admin)
|
||||
config.remove_capability(.cap_net_raw)
|
||||
config.add_rlimit(.rlimit_nproc, 100, 50)
|
||||
config.set_no_new_privileges(true)
|
||||
|
||||
// Add some additional security hardening
|
||||
|
||||
config.add_masked_path('/proc/kcore')
|
||||
.add_readonly_path('/proc/sys')
|
||||
|
||||
config.add_readonly_path('/proc/sys')
|
||||
|
||||
// Validate before use
|
||||
config.validate()!
|
||||
|
||||
|
||||
// Get the JSON
|
||||
json_str := config.to_json()!
|
||||
println('Custom container config:')
|
||||
println(json_str)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,10 @@ module crun
|
||||
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
|
||||
|
||||
@[params]
|
||||
pub struct FactoryArgs {
|
||||
pub mut:
|
||||
name string = "default"
|
||||
name string = 'default'
|
||||
}
|
||||
|
||||
pub struct CrunConfig {
|
||||
@@ -23,6 +22,8 @@ pub fn (mount_type MountType) to_string() string {
|
||||
.proc { 'proc' }
|
||||
.sysfs { 'sysfs' }
|
||||
.devpts { 'devpts' }
|
||||
.mqueue { 'mqueue' }
|
||||
.cgroup { 'cgroup' }
|
||||
.nfs { 'nfs' }
|
||||
.overlay { 'overlay' }
|
||||
}
|
||||
@@ -120,21 +121,23 @@ pub fn (mut config CrunConfig) set_working_dir(cwd string) &CrunConfig {
|
||||
|
||||
pub fn (mut config CrunConfig) set_user(uid u32, gid u32, additional_gids []u32) &CrunConfig {
|
||||
config.spec.process.user = User{
|
||||
uid: uid
|
||||
gid: gid
|
||||
uid: uid
|
||||
gid: gid
|
||||
additional_gids: additional_gids.clone()
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
pub fn (mut config CrunConfig) add_env(key string, value string) &CrunConfig {
|
||||
// Remove existing env var with same key to avoid duplicates
|
||||
config.spec.process.env = config.spec.process.env.filter(!it.starts_with('${key}='))
|
||||
config.spec.process.env << '${key}=${value}'
|
||||
return config
|
||||
}
|
||||
|
||||
pub fn (mut config CrunConfig) set_rootfs(path string, readonly bool) &CrunConfig {
|
||||
config.spec.root = Root{
|
||||
path: path
|
||||
path: path
|
||||
readonly: readonly
|
||||
}
|
||||
return config
|
||||
@@ -165,16 +168,16 @@ pub fn (mut config CrunConfig) set_pids_limit(limit i64) &CrunConfig {
|
||||
pub fn (mut config CrunConfig) add_mount(destination string, source string, typ MountType, options []MountOption) &CrunConfig {
|
||||
config.spec.mounts << Mount{
|
||||
destination: destination
|
||||
typ: typ.to_string()
|
||||
source: source
|
||||
options: options.map(it.to_string())
|
||||
typ: typ.to_string()
|
||||
source: source
|
||||
options: options.map(it.to_string())
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
pub fn (mut config CrunConfig) add_capability(cap Capability) &CrunConfig {
|
||||
cap_str := cap.to_string()
|
||||
|
||||
|
||||
if cap_str !in config.spec.process.capabilities.bounding {
|
||||
config.spec.process.capabilities.bounding << cap_str
|
||||
}
|
||||
@@ -189,7 +192,7 @@ pub fn (mut config CrunConfig) add_capability(cap Capability) &CrunConfig {
|
||||
|
||||
pub fn (mut config CrunConfig) remove_capability(cap Capability) &CrunConfig {
|
||||
cap_str := cap.to_string()
|
||||
|
||||
|
||||
config.spec.process.capabilities.bounding = config.spec.process.capabilities.bounding.filter(it != cap_str)
|
||||
config.spec.process.capabilities.effective = config.spec.process.capabilities.effective.filter(it != cap_str)
|
||||
config.spec.process.capabilities.permitted = config.spec.process.capabilities.permitted.filter(it != cap_str)
|
||||
@@ -197,8 +200,11 @@ pub fn (mut config CrunConfig) remove_capability(cap Capability) &CrunConfig {
|
||||
}
|
||||
|
||||
pub fn (mut config CrunConfig) add_rlimit(typ RlimitType, hard u64, soft u64) &CrunConfig {
|
||||
// Remove existing rlimit with same type to avoid duplicates
|
||||
typ_str := typ.to_string()
|
||||
config.spec.process.rlimits = config.spec.process.rlimits.filter(it.typ != typ_str)
|
||||
config.spec.process.rlimits << Rlimit{
|
||||
typ: typ.to_string()
|
||||
typ: typ_str
|
||||
hard: hard
|
||||
soft: soft
|
||||
}
|
||||
@@ -210,6 +216,11 @@ pub fn (mut config CrunConfig) set_no_new_privileges(value bool) &CrunConfig {
|
||||
return config
|
||||
}
|
||||
|
||||
pub fn (mut config CrunConfig) set_terminal(value bool) &CrunConfig {
|
||||
config.spec.process.terminal = value
|
||||
return config
|
||||
}
|
||||
|
||||
pub fn (mut config CrunConfig) add_masked_path(path string) &CrunConfig {
|
||||
if path !in config.spec.linux.masked_paths {
|
||||
config.spec.linux.masked_paths << path
|
||||
@@ -226,67 +237,65 @@ pub fn (mut config CrunConfig) add_readonly_path(path string) &CrunConfig {
|
||||
|
||||
pub fn new(mut configs map[string]&CrunConfig, args FactoryArgs) !&CrunConfig {
|
||||
name := texttools.name_fix(args.name)
|
||||
|
||||
|
||||
mut config := &CrunConfig{
|
||||
name: name
|
||||
spec: create_default_spec()
|
||||
}
|
||||
|
||||
|
||||
configs[name] = config
|
||||
return config
|
||||
}
|
||||
|
||||
pub fn get(configs map[string]&CrunConfig, args FactoryArgs) !&CrunConfig {
|
||||
name := texttools.name_fix(args.name)
|
||||
return configs[name] or {
|
||||
return error('crun config with name "${name}" does not exist')
|
||||
}
|
||||
return configs[name] or { return error('crun config with name "${name}" does not exist') }
|
||||
}
|
||||
|
||||
fn create_default_spec() Spec {
|
||||
// Create default spec that matches the heropods template
|
||||
mut spec := Spec{
|
||||
oci_version: '1.0.2' // Set default here
|
||||
platform: Platform{
|
||||
os: 'linux'
|
||||
platform: Platform{
|
||||
os: 'linux'
|
||||
arch: 'amd64'
|
||||
}
|
||||
process: Process{
|
||||
terminal: true
|
||||
user: User{
|
||||
process: Process{
|
||||
terminal: true
|
||||
user: User{
|
||||
uid: 0
|
||||
gid: 0
|
||||
}
|
||||
args: ['/bin/sh']
|
||||
env: [
|
||||
args: ['/bin/sh']
|
||||
env: [
|
||||
'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
|
||||
'TERM=xterm'
|
||||
'TERM=xterm',
|
||||
]
|
||||
cwd: '/'
|
||||
capabilities: Capabilities{
|
||||
bounding: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
|
||||
effective: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
|
||||
cwd: '/'
|
||||
capabilities: Capabilities{
|
||||
bounding: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
|
||||
effective: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
|
||||
inheritable: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
|
||||
permitted: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
|
||||
permitted: ['CAP_AUDIT_WRITE', 'CAP_KILL', 'CAP_NET_BIND_SERVICE']
|
||||
}
|
||||
rlimits: [
|
||||
rlimits: [
|
||||
Rlimit{
|
||||
typ: 'RLIMIT_NOFILE'
|
||||
typ: 'RLIMIT_NOFILE'
|
||||
hard: 1024
|
||||
soft: 1024
|
||||
}
|
||||
},
|
||||
]
|
||||
no_new_privileges: true // No JSON annotation needed here
|
||||
}
|
||||
root: Root{
|
||||
path: 'rootfs'
|
||||
root: Root{
|
||||
path: 'rootfs'
|
||||
readonly: false
|
||||
}
|
||||
hostname: 'container'
|
||||
mounts: create_default_mounts()
|
||||
linux: Linux{
|
||||
namespaces: create_default_namespaces()
|
||||
masked_paths: [
|
||||
hostname: 'container'
|
||||
mounts: create_default_mounts()
|
||||
linux: Linux{
|
||||
namespaces: create_default_namespaces()
|
||||
masked_paths: [
|
||||
'/proc/acpi',
|
||||
'/proc/kcore',
|
||||
'/proc/keys',
|
||||
@@ -295,7 +304,7 @@ fn create_default_spec() Spec {
|
||||
'/proc/timer_stats',
|
||||
'/proc/sched_debug',
|
||||
'/proc/scsi',
|
||||
'/sys/firmware'
|
||||
'/sys/firmware',
|
||||
]
|
||||
readonly_paths: [
|
||||
'/proc/asound',
|
||||
@@ -303,21 +312,34 @@ fn create_default_spec() Spec {
|
||||
'/proc/fs',
|
||||
'/proc/irq',
|
||||
'/proc/sys',
|
||||
'/proc/sysrq-trigger'
|
||||
'/proc/sysrq-trigger',
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
fn create_default_namespaces() []LinuxNamespace {
|
||||
return [
|
||||
LinuxNamespace{typ: 'pid'},
|
||||
LinuxNamespace{typ: 'network'},
|
||||
LinuxNamespace{typ: 'ipc'},
|
||||
LinuxNamespace{typ: 'uts'},
|
||||
LinuxNamespace{typ: 'mount'},
|
||||
LinuxNamespace{
|
||||
typ: 'pid'
|
||||
},
|
||||
LinuxNamespace{
|
||||
typ: 'network'
|
||||
},
|
||||
LinuxNamespace{
|
||||
typ: 'ipc'
|
||||
},
|
||||
LinuxNamespace{
|
||||
typ: 'uts'
|
||||
},
|
||||
LinuxNamespace{
|
||||
typ: 'cgroup'
|
||||
},
|
||||
LinuxNamespace{
|
||||
typ: 'mount'
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
@@ -325,20 +347,44 @@ fn create_default_mounts() []Mount {
|
||||
return [
|
||||
Mount{
|
||||
destination: '/proc'
|
||||
typ: 'proc'
|
||||
source: 'proc'
|
||||
typ: 'proc'
|
||||
source: 'proc'
|
||||
},
|
||||
Mount{
|
||||
destination: '/dev'
|
||||
typ: 'tmpfs'
|
||||
source: 'tmpfs'
|
||||
options: ['nosuid', 'strictatime', 'mode=755', 'size=65536k']
|
||||
typ: 'tmpfs'
|
||||
source: 'tmpfs'
|
||||
options: ['nosuid', 'strictatime', 'mode=755', 'size=65536k']
|
||||
},
|
||||
Mount{
|
||||
destination: '/dev/pts'
|
||||
typ: 'devpts'
|
||||
source: 'devpts'
|
||||
options: ['nosuid', 'noexec', 'newinstance', 'ptmxmode=0666', 'mode=0620', 'gid=5']
|
||||
},
|
||||
Mount{
|
||||
destination: '/dev/shm'
|
||||
typ: 'tmpfs'
|
||||
source: 'shm'
|
||||
options: ['nosuid', 'noexec', 'nodev', 'mode=1777', 'size=65536k']
|
||||
},
|
||||
Mount{
|
||||
destination: '/dev/mqueue'
|
||||
typ: 'mqueue'
|
||||
source: 'mqueue'
|
||||
options: ['nosuid', 'noexec', 'nodev']
|
||||
},
|
||||
Mount{
|
||||
destination: '/sys'
|
||||
typ: 'sysfs'
|
||||
source: 'sysfs'
|
||||
options: ['nosuid', 'noexec', 'nodev', 'ro']
|
||||
typ: 'sysfs'
|
||||
source: 'sysfs'
|
||||
options: ['nosuid', 'noexec', 'nodev', 'ro']
|
||||
},
|
||||
Mount{
|
||||
destination: '/sys/fs/cgroup'
|
||||
typ: 'cgroup'
|
||||
source: 'cgroup'
|
||||
options: ['nosuid', 'noexec', 'nodev', 'relatime', 'ro']
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ module crun
|
||||
// OCI Runtime Spec structures that can be directly encoded to JSON
|
||||
pub struct Spec {
|
||||
pub mut:
|
||||
oci_version string
|
||||
oci_version string @[json: 'ociVersion']
|
||||
platform Platform
|
||||
process Process
|
||||
root Root
|
||||
@@ -21,21 +21,21 @@ pub mut:
|
||||
|
||||
pub struct Process {
|
||||
pub mut:
|
||||
terminal bool = true
|
||||
user User
|
||||
args []string
|
||||
env []string
|
||||
cwd string = '/'
|
||||
capabilities Capabilities
|
||||
rlimits []Rlimit
|
||||
no_new_privileges bool
|
||||
terminal bool = true
|
||||
user User
|
||||
args []string
|
||||
env []string
|
||||
cwd string = '/'
|
||||
capabilities Capabilities
|
||||
rlimits []Rlimit
|
||||
no_new_privileges bool @[json: 'noNewPrivileges']
|
||||
}
|
||||
|
||||
pub struct User {
|
||||
pub mut:
|
||||
uid u32
|
||||
gid u32
|
||||
additional_gids []u32
|
||||
additional_gids []u32 @[json: 'additionalGids']
|
||||
}
|
||||
|
||||
pub struct Capabilities {
|
||||
@@ -49,7 +49,7 @@ pub mut:
|
||||
|
||||
pub struct Rlimit {
|
||||
pub mut:
|
||||
typ string
|
||||
typ string @[json: 'type']
|
||||
hard u64
|
||||
soft u64
|
||||
}
|
||||
@@ -63,26 +63,26 @@ pub mut:
|
||||
pub struct Mount {
|
||||
pub mut:
|
||||
destination string
|
||||
typ string
|
||||
typ string @[json: 'type']
|
||||
source string
|
||||
options []string
|
||||
}
|
||||
|
||||
pub struct Linux {
|
||||
pub mut:
|
||||
namespaces []LinuxNamespace
|
||||
resources LinuxResources
|
||||
devices []LinuxDevice
|
||||
masked_paths []string
|
||||
readonly_paths []string
|
||||
uid_mappings []LinuxIDMapping
|
||||
gid_mappings []LinuxIDMapping
|
||||
namespaces []LinuxNamespace
|
||||
resources LinuxResources
|
||||
devices []LinuxDevice
|
||||
masked_paths []string @[json: 'maskedPaths']
|
||||
readonly_paths []string @[json: 'readonlyPaths']
|
||||
uid_mappings []LinuxIDMapping @[json: 'uidMappings']
|
||||
gid_mappings []LinuxIDMapping @[json: 'gidMappings']
|
||||
}
|
||||
|
||||
pub struct LinuxNamespace {
|
||||
pub mut:
|
||||
typ string
|
||||
path string
|
||||
typ string @[json: 'type']
|
||||
path string @[omitempty]
|
||||
}
|
||||
|
||||
pub struct LinuxResources {
|
||||
@@ -95,47 +95,47 @@ pub mut:
|
||||
|
||||
pub struct Memory {
|
||||
pub mut:
|
||||
limit u64
|
||||
reservation u64
|
||||
swap u64
|
||||
kernel u64
|
||||
swappiness i64
|
||||
limit u64 @[omitempty]
|
||||
reservation u64 @[omitempty]
|
||||
swap u64 @[omitempty]
|
||||
kernel u64 @[omitempty]
|
||||
swappiness i64 @[omitempty]
|
||||
}
|
||||
|
||||
pub struct CPU {
|
||||
pub mut:
|
||||
shares u64
|
||||
quota i64
|
||||
period u64
|
||||
cpus string
|
||||
mems string
|
||||
shares u64 @[omitempty]
|
||||
quota i64 @[omitempty]
|
||||
period u64 @[omitempty]
|
||||
cpus string @[omitempty]
|
||||
mems string @[omitempty]
|
||||
}
|
||||
|
||||
pub struct Pids {
|
||||
pub mut:
|
||||
limit i64
|
||||
limit i64 @[omitempty]
|
||||
}
|
||||
|
||||
pub struct BlockIO {
|
||||
pub mut:
|
||||
weight u16
|
||||
weight u16 @[omitempty]
|
||||
}
|
||||
|
||||
pub struct LinuxDevice {
|
||||
pub mut:
|
||||
path string
|
||||
typ string
|
||||
major i64
|
||||
minor i64
|
||||
file_mode u32
|
||||
uid u32
|
||||
gid u32
|
||||
path string
|
||||
typ string @[json: 'type']
|
||||
major i64
|
||||
minor i64
|
||||
file_mode u32 @[json: 'fileMode']
|
||||
uid u32
|
||||
gid u32
|
||||
}
|
||||
|
||||
pub struct LinuxIDMapping {
|
||||
pub mut:
|
||||
container_id u32
|
||||
host_id u32
|
||||
container_id u32 @[json: 'containerID']
|
||||
host_id u32 @[json: 'hostID']
|
||||
size u32
|
||||
}
|
||||
|
||||
@@ -160,6 +160,8 @@ pub enum MountType {
|
||||
proc
|
||||
sysfs
|
||||
devpts
|
||||
mqueue
|
||||
cgroup
|
||||
nfs
|
||||
overlay
|
||||
}
|
||||
@@ -235,4 +237,4 @@ pub enum RlimitType {
|
||||
rlimit_nice
|
||||
rlimit_rtprio
|
||||
rlimit_rttime
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
{
|
||||
"ociVersion": "1.0.2",
|
||||
"process": {
|
||||
"terminal": true,
|
||||
"user": {
|
||||
"uid": 0,
|
||||
"gid": 0
|
||||
},
|
||||
"args": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"while true; do sleep 30; done"
|
||||
],
|
||||
"env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"TERM=xterm"
|
||||
],
|
||||
"cwd": "/",
|
||||
"capabilities": {
|
||||
"bounding": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
],
|
||||
"effective": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
],
|
||||
"inheritable": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
],
|
||||
"permitted": [
|
||||
"CAP_AUDIT_WRITE",
|
||||
"CAP_KILL",
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
]
|
||||
},
|
||||
"rlimits": [
|
||||
{
|
||||
"type": "RLIMIT_NOFILE",
|
||||
"hard": 1024,
|
||||
"soft": 1024
|
||||
}
|
||||
],
|
||||
"noNewPrivileges": true
|
||||
},
|
||||
"root": {
|
||||
"path": "${rootfs_path}",
|
||||
"readonly": false
|
||||
},
|
||||
"mounts": [
|
||||
{
|
||||
"destination": "/proc",
|
||||
"type": "proc",
|
||||
"source": "proc"
|
||||
},
|
||||
{
|
||||
"destination": "/dev",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"strictatime",
|
||||
"mode=755",
|
||||
"size=65536k"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/sys",
|
||||
"type": "sysfs",
|
||||
"source": "sysfs",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev",
|
||||
"ro"
|
||||
]
|
||||
}
|
||||
],
|
||||
"linux": {
|
||||
"namespaces": [
|
||||
{
|
||||
"type": "pid"
|
||||
},
|
||||
{
|
||||
"type": "network"
|
||||
},
|
||||
{
|
||||
"type": "ipc"
|
||||
},
|
||||
{
|
||||
"type": "uts"
|
||||
},
|
||||
{
|
||||
"type": "mount"
|
||||
}
|
||||
],
|
||||
"maskedPaths": [
|
||||
"/proc/acpi",
|
||||
"/proc/kcore",
|
||||
"/proc/keys",
|
||||
"/proc/latency_stats",
|
||||
"/proc/timer_list",
|
||||
"/proc/timer_stats",
|
||||
"/proc/sched_debug",
|
||||
"/proc/scsi",
|
||||
"/sys/firmware"
|
||||
],
|
||||
"readonlyPaths": [
|
||||
"/proc/asound",
|
||||
"/proc/bus",
|
||||
"/proc/fs",
|
||||
"/proc/irq",
|
||||
"/proc/sys",
|
||||
"/proc/sysrq-trigger"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -3,16 +3,19 @@ module heropods
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.osal.tmux
|
||||
import freeflowuniverse.herolib.osal.core as osal
|
||||
import freeflowuniverse.herolib.virt.crun
|
||||
import time
|
||||
import freeflowuniverse.herolib.builder
|
||||
import json
|
||||
|
||||
@[heap]
|
||||
pub struct Container {
|
||||
pub mut:
|
||||
name string
|
||||
node ?&builder.Node
|
||||
tmux_pane ?&tmux.Pane
|
||||
factory &ContainerFactory
|
||||
name string
|
||||
node ?&builder.Node
|
||||
tmux_pane ?&tmux.Pane
|
||||
crun_config ?&crun.CrunConfig
|
||||
factory &ContainerFactory
|
||||
}
|
||||
|
||||
// Struct to parse JSON output of `crun state`
|
||||
@@ -31,10 +34,32 @@ pub fn (mut self Container) start() ! {
|
||||
if !container_exists {
|
||||
// Container doesn't exist, create it first
|
||||
console.print_debug('Container ${self.name} does not exist, creating it...')
|
||||
osal.exec(
|
||||
cmd: 'crun create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
|
||||
// Try to create the container, if it fails with "File exists" error,
|
||||
// try to force delete any leftover state and retry
|
||||
crun_root := '${self.factory.base_dir}/runtime'
|
||||
create_result := osal.exec(
|
||||
cmd: 'crun --root ${crun_root} create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
|
||||
stdout: true
|
||||
)!
|
||||
) or {
|
||||
if err.msg().contains('File exists') {
|
||||
console.print_debug('Container creation failed with "File exists", attempting to clean up leftover state...')
|
||||
// Force delete any leftover state - try multiple cleanup approaches
|
||||
osal.exec(cmd: 'crun --root ${crun_root} delete ${self.name}', stdout: false) or {}
|
||||
osal.exec(cmd: 'crun delete ${self.name}', stdout: false) or {} // Also try default root
|
||||
// Clean up any leftover runtime directories
|
||||
osal.exec(cmd: 'rm -rf ${crun_root}/${self.name}', stdout: false) or {}
|
||||
osal.exec(cmd: 'rm -rf /run/crun/${self.name}', stdout: false) or {}
|
||||
// Wait a moment for cleanup to complete
|
||||
time.sleep(500 * time.millisecond)
|
||||
// Retry creation
|
||||
osal.exec(
|
||||
cmd: 'crun --root ${crun_root} create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
|
||||
stdout: true
|
||||
)!
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
console.print_debug('Container ${self.name} created')
|
||||
}
|
||||
|
||||
@@ -48,16 +73,18 @@ pub fn (mut self Container) start() ! {
|
||||
// because crun doesn't allow restarting a stopped container
|
||||
if container_exists && status != .running {
|
||||
console.print_debug('Container ${self.name} exists but is stopped, recreating...')
|
||||
osal.exec(cmd: 'crun delete ${self.name}', stdout: false) or {}
|
||||
crun_root := '${self.factory.base_dir}/runtime'
|
||||
osal.exec(cmd: 'crun --root ${crun_root} delete ${self.name}', stdout: false) or {}
|
||||
osal.exec(
|
||||
cmd: 'crun create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
|
||||
cmd: 'crun --root ${crun_root} create --bundle ${self.factory.base_dir}/configs/${self.name} ${self.name}'
|
||||
stdout: true
|
||||
)!
|
||||
console.print_debug('Container ${self.name} recreated')
|
||||
}
|
||||
|
||||
// start the container (crun start doesn't have --detach flag)
|
||||
osal.exec(cmd: 'crun start ${self.name}', stdout: true)!
|
||||
crun_root := '${self.factory.base_dir}/runtime'
|
||||
osal.exec(cmd: 'crun --root ${crun_root} start ${self.name}', stdout: true)!
|
||||
console.print_green('Container ${self.name} started')
|
||||
}
|
||||
|
||||
@@ -68,12 +95,13 @@ pub fn (mut self Container) stop() ! {
|
||||
return
|
||||
}
|
||||
|
||||
osal.exec(cmd: 'crun kill ${self.name} SIGTERM', stdout: false) or {}
|
||||
crun_root := '${self.factory.base_dir}/runtime'
|
||||
osal.exec(cmd: 'crun --root ${crun_root} kill ${self.name} SIGTERM', stdout: false) or {}
|
||||
time.sleep(2 * time.second)
|
||||
|
||||
// Force kill if still running
|
||||
if self.status()! == .running {
|
||||
osal.exec(cmd: 'crun kill ${self.name} SIGKILL', stdout: false) or {}
|
||||
osal.exec(cmd: 'crun --root ${crun_root} kill ${self.name} SIGKILL', stdout: false) or {}
|
||||
}
|
||||
console.print_green('Container ${self.name} stopped')
|
||||
}
|
||||
@@ -86,7 +114,8 @@ pub fn (mut self Container) delete() ! {
|
||||
}
|
||||
|
||||
self.stop()!
|
||||
osal.exec(cmd: 'crun delete ${self.name}', stdout: false) or {}
|
||||
crun_root := '${self.factory.base_dir}/runtime'
|
||||
osal.exec(cmd: 'crun --root ${crun_root} delete ${self.name}', stdout: false) or {}
|
||||
|
||||
// Remove from factory's container cache
|
||||
if self.name in self.factory.containers {
|
||||
@@ -110,7 +139,10 @@ pub fn (mut self Container) exec(cmd_ osal.Command) !string {
|
||||
}
|
||||
|
||||
pub fn (self Container) status() !ContainerStatus {
|
||||
result := osal.exec(cmd: 'crun state ${self.name}', stdout: false) or { return .unknown }
|
||||
crun_root := '${self.factory.base_dir}/runtime'
|
||||
result := osal.exec(cmd: 'crun --root ${crun_root} state ${self.name}', stdout: false) or {
|
||||
return .unknown
|
||||
}
|
||||
|
||||
// Parse JSON output from crun state
|
||||
state := json.decode(CrunState, result.output) or { return .unknown }
|
||||
@@ -126,7 +158,10 @@ pub fn (self Container) status() !ContainerStatus {
|
||||
// Check if container exists in crun (regardless of its state)
|
||||
fn (self Container) container_exists_in_crun() !bool {
|
||||
// Try to get container state - if it fails, container doesn't exist
|
||||
result := osal.exec(cmd: 'crun state ${self.name}', stdout: false) or { return false }
|
||||
crun_root := '${self.factory.base_dir}/runtime'
|
||||
result := osal.exec(cmd: 'crun --root ${crun_root} state ${self.name}', stdout: false) or {
|
||||
return false
|
||||
}
|
||||
|
||||
// If we get here, the container exists (even if stopped/paused)
|
||||
return result.exit_code == 0
|
||||
@@ -206,7 +241,8 @@ pub fn (mut self Container) tmux_pane(args TmuxPaneArgs) !&tmux.Pane {
|
||||
|
||||
// Execute command if provided
|
||||
if args.cmd != '' {
|
||||
pane.send_keys('crun exec ${self.name} ${args.cmd}')!
|
||||
crun_root := '${self.factory.base_dir}/runtime'
|
||||
pane.send_keys('crun --root ${crun_root} exec ${self.name} ${args.cmd}')!
|
||||
}
|
||||
|
||||
self.tmux_pane = pane
|
||||
@@ -223,6 +259,7 @@ pub fn (mut self Container) node() !&builder.Node {
|
||||
|
||||
mut exec := builder.ExecutorCrun{
|
||||
container_id: self.name
|
||||
crun_root: '${self.factory.base_dir}/runtime'
|
||||
debug: false
|
||||
}
|
||||
|
||||
@@ -242,3 +279,58 @@ pub fn (mut self Container) node() !&builder.Node {
|
||||
self.node = node
|
||||
return node
|
||||
}
|
||||
|
||||
// Get the crun configuration for this container
|
||||
pub fn (self Container) config() !&crun.CrunConfig {
|
||||
return self.crun_config or { return error('Container ${self.name} has no crun configuration') }
|
||||
}
|
||||
|
||||
// Container configuration customization methods
|
||||
pub fn (mut self Container) set_memory_limit(limit_mb u64) !&Container {
|
||||
mut config := self.config()!
|
||||
config.set_memory_limit(limit_mb * 1024 * 1024) // Convert MB to bytes
|
||||
return &self
|
||||
}
|
||||
|
||||
pub fn (mut self Container) set_cpu_limits(period u64, quota i64, shares u64) !&Container {
|
||||
mut config := self.config()!
|
||||
config.set_cpu_limits(period, quota, shares)
|
||||
return &self
|
||||
}
|
||||
|
||||
pub fn (mut self Container) add_mount(source string, destination string, mount_type crun.MountType, options []crun.MountOption) !&Container {
|
||||
mut config := self.config()!
|
||||
config.add_mount(source, destination, mount_type, options)
|
||||
return &self
|
||||
}
|
||||
|
||||
pub fn (mut self Container) add_capability(cap crun.Capability) !&Container {
|
||||
mut config := self.config()!
|
||||
config.add_capability(cap)
|
||||
return &self
|
||||
}
|
||||
|
||||
pub fn (mut self Container) remove_capability(cap crun.Capability) !&Container {
|
||||
mut config := self.config()!
|
||||
config.remove_capability(cap)
|
||||
return &self
|
||||
}
|
||||
|
||||
pub fn (mut self Container) add_env(key string, value string) !&Container {
|
||||
mut config := self.config()!
|
||||
config.add_env(key, value)
|
||||
return &self
|
||||
}
|
||||
|
||||
pub fn (mut self Container) set_working_dir(dir string) !&Container {
|
||||
mut config := self.config()!
|
||||
config.set_working_dir(dir)
|
||||
return &self
|
||||
}
|
||||
|
||||
// Save the current configuration to disk
|
||||
pub fn (self Container) save_config() ! {
|
||||
config := self.config()!
|
||||
config_path := '${self.factory.base_dir}/configs/${self.name}/config.json'
|
||||
config.save_to_file(config_path)!
|
||||
}
|
||||
|
||||
@@ -2,10 +2,9 @@ module heropods
|
||||
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.osal.core as osal
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.virt.crun
|
||||
import freeflowuniverse.herolib.installers.virt.herorunner as herorunner_installer
|
||||
import os
|
||||
import x.json2
|
||||
|
||||
// Updated enum to be more flexible
|
||||
pub enum ContainerImageType {
|
||||
@@ -27,7 +26,7 @@ pub:
|
||||
|
||||
pub fn (mut self ContainerFactory) new(args ContainerNewArgs) !&Container {
|
||||
if args.name in self.containers && !args.reset {
|
||||
return self.containers[args.name]
|
||||
return self.containers[args.name] or { panic('bug: container should exist') }
|
||||
}
|
||||
|
||||
// Determine image to use
|
||||
@@ -67,8 +66,8 @@ pub fn (mut self ContainerFactory) new(args ContainerNewArgs) !&Container {
|
||||
return error('Image rootfs not found: ${rootfs_path}. Please ensure the image is available.')
|
||||
}
|
||||
|
||||
// Create container config (with terminal disabled) but don't create the container yet
|
||||
self.create_container_config(args.name, rootfs_path)!
|
||||
// Create crun configuration using the crun module
|
||||
mut crun_config := self.create_crun_config(args.name, rootfs_path)!
|
||||
|
||||
// Ensure crun is installed on host
|
||||
if !osal.cmd_exists('crun') {
|
||||
@@ -79,41 +78,45 @@ pub fn (mut self ContainerFactory) new(args ContainerNewArgs) !&Container {
|
||||
// Create container struct but don't create the actual container in crun yet
|
||||
// The actual container creation will happen in container.start()
|
||||
mut container := &Container{
|
||||
name: args.name
|
||||
factory: &self
|
||||
name: args.name
|
||||
crun_config: crun_config
|
||||
factory: &self
|
||||
}
|
||||
|
||||
self.containers[args.name] = container
|
||||
return container
|
||||
}
|
||||
|
||||
// Create OCI config.json from template
|
||||
fn (self ContainerFactory) create_container_config(container_name string, rootfs_path string) ! {
|
||||
// Create crun configuration using the crun module
|
||||
fn (mut self ContainerFactory) create_crun_config(container_name string, rootfs_path string) !&crun.CrunConfig {
|
||||
// Create crun configuration using the factory pattern
|
||||
mut config := crun.new(mut self.crun_configs, name: container_name)!
|
||||
|
||||
// Configure for heropods use case - disable terminal for background containers
|
||||
config.set_terminal(false)
|
||||
config.set_command(['/bin/sh', '-c', 'while true; do sleep 30; done'])
|
||||
config.set_working_dir('/')
|
||||
config.set_user(0, 0, [])
|
||||
config.add_env('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin')
|
||||
config.add_env('TERM', 'xterm')
|
||||
config.set_rootfs(rootfs_path, false)
|
||||
config.set_hostname('container')
|
||||
config.set_no_new_privileges(true)
|
||||
|
||||
// Add the specific rlimit for file descriptors
|
||||
config.add_rlimit(.rlimit_nofile, 1024, 1024)
|
||||
|
||||
// Validate the configuration
|
||||
config.validate()!
|
||||
|
||||
// Create config directory and save JSON
|
||||
config_dir := '${self.base_dir}/configs/${container_name}'
|
||||
osal.exec(cmd: 'mkdir -p ${config_dir}', stdout: false)!
|
||||
|
||||
// Load template
|
||||
mut config_content := $tmpl('config_template.json')
|
||||
|
||||
// Parse JSON with json2
|
||||
mut root := json2.raw_decode(config_content)!
|
||||
mut config := root.as_map()
|
||||
|
||||
// Get or create process map
|
||||
mut process := if 'process' in config {
|
||||
config['process'].as_map()
|
||||
} else {
|
||||
map[string]json2.Any{}
|
||||
}
|
||||
|
||||
// Force disable terminal
|
||||
process['terminal'] = json2.Any(false)
|
||||
config['process'] = json2.Any(process)
|
||||
|
||||
// Write back to config.json
|
||||
config_path := '${config_dir}/config.json'
|
||||
mut p := pathlib.get_file(path: config_path, create: true)!
|
||||
p.write(json2.encode_pretty(json2.Any(config)))!
|
||||
config.save_to_file(config_path)!
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// Use podman to pull image and extract rootfs
|
||||
|
||||
@@ -2,7 +2,7 @@ module heropods
|
||||
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.osal.core as osal
|
||||
import time
|
||||
import freeflowuniverse.herolib.virt.crun
|
||||
import os
|
||||
|
||||
@[heap]
|
||||
@@ -11,6 +11,7 @@ pub mut:
|
||||
tmux_session string
|
||||
containers map[string]&Container
|
||||
images map[string]&ContainerImage
|
||||
crun_configs map[string]&crun.CrunConfig
|
||||
base_dir string
|
||||
}
|
||||
|
||||
@@ -45,6 +46,11 @@ fn (mut self ContainerFactory) init(args FactoryInitArgs) ! {
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any leftover crun state if reset is requested
|
||||
if args.reset {
|
||||
self.cleanup_crun_state()!
|
||||
}
|
||||
|
||||
// Load existing images into cache
|
||||
self.load_existing_images()!
|
||||
|
||||
@@ -104,7 +110,7 @@ pub fn (mut self ContainerFactory) get(args ContainerNewArgs) !&Container {
|
||||
if args.name !in self.containers {
|
||||
return error('Container "${args.name}" does not exist. Use factory.new() to create it first.')
|
||||
}
|
||||
return self.containers[args.name]
|
||||
return self.containers[args.name] or { panic('bug: container should exist') }
|
||||
}
|
||||
|
||||
// Get image by name
|
||||
@@ -112,7 +118,7 @@ pub fn (mut self ContainerFactory) image_get(name string) !&ContainerImage {
|
||||
if name !in self.images {
|
||||
return error('Image "${name}" not found in cache. Try importing or downloading it.')
|
||||
}
|
||||
return self.images[name]
|
||||
return self.images[name] or { panic('bug: image should exist') }
|
||||
}
|
||||
|
||||
// List all containers currently managed by crun
|
||||
@@ -136,3 +142,34 @@ pub fn (self ContainerFactory) list() ![]Container {
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
||||
// Clean up any leftover crun state
|
||||
fn (mut self ContainerFactory) cleanup_crun_state() ! {
|
||||
console.print_debug('Cleaning up leftover crun state...')
|
||||
crun_root := '${self.base_dir}/runtime'
|
||||
|
||||
// Stop and delete all containers in our custom root
|
||||
result := osal.exec(cmd: 'crun --root ${crun_root} list -q', stdout: false) or { return }
|
||||
|
||||
for container_name in result.output.split_into_lines() {
|
||||
if container_name.trim_space() != '' {
|
||||
console.print_debug('Cleaning up container: ${container_name}')
|
||||
osal.exec(cmd: 'crun --root ${crun_root} kill ${container_name} SIGKILL', stdout: false) or {}
|
||||
osal.exec(cmd: 'crun --root ${crun_root} delete ${container_name}', stdout: false) or {}
|
||||
}
|
||||
}
|
||||
|
||||
// Also clean up any containers in the default root that might be ours
|
||||
result2 := osal.exec(cmd: 'crun list -q', stdout: false) or { return }
|
||||
for container_name in result2.output.split_into_lines() {
|
||||
if container_name.trim_space() != '' && container_name in self.containers {
|
||||
console.print_debug('Cleaning up container from default root: ${container_name}')
|
||||
osal.exec(cmd: 'crun kill ${container_name} SIGKILL', stdout: false) or {}
|
||||
osal.exec(cmd: 'crun delete ${container_name}', stdout: false) or {}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up runtime directories
|
||||
osal.exec(cmd: 'rm -rf ${crun_root}/*', stdout: false) or {}
|
||||
osal.exec(cmd: 'find /run/crun -name "*" -type d -exec rm -rf {} + 2>/dev/null', stdout: false) or {}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user