Merge branch 'development' into 6-openrpc-code-generator
This commit is contained in:
20
.github/workflows/documentation.yml
vendored
20
.github/workflows/documentation.yml
vendored
@@ -2,8 +2,9 @@ name: Deploy Documentation to Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["development","main"]
|
||||
branches: ["development"]
|
||||
workflow_dispatch:
|
||||
branches: ["development"]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -16,34 +17,31 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
deploy-documentation:
|
||||
#if: startsWith(github.ref, 'refs/tags/')
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Vlang dependencies
|
||||
run: sudo apt update && sudo apt install -y libgc-dev
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Vlang
|
||||
run: ./install_v.sh
|
||||
run: ./install_v.sh
|
||||
|
||||
- name: Generate documentation
|
||||
run: |
|
||||
./doc.vsh
|
||||
# ls /home/runner/work/herolib/docs
|
||||
./doc.vsh
|
||||
find .
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v3
|
||||
uses: actions/configure-pages@v4
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: "/home/runner/work/herolib/herolib/docs"
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v1
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
88
.github/workflows/hero_build.yml
vendored
Normal file
88
.github/workflows/hero_build.yml
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
name: Release Hero
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 60
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
os: ubuntu-latest
|
||||
short-name: linux-i64
|
||||
- target: aarch64-unknown-linux-musl
|
||||
os: ubuntu-latest
|
||||
short-name: linux-arm64
|
||||
- target: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
short-name: macos-arm64
|
||||
- target: x86_64-apple-darwin
|
||||
os: macos-13
|
||||
short-name: macos-i64
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
|
||||
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!"
|
||||
- run: echo "🔎 The name of your branch is ${{ github.ref_name }} and your repository is ${{ github.repository }}."
|
||||
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup V & Herolib
|
||||
id: setup
|
||||
run: ./install_v.sh --herolib
|
||||
timeout-minutes: 10
|
||||
|
||||
|
||||
- name: Do all the basic tests
|
||||
timeout-minutes: 25
|
||||
run: ./test_basic.vsh
|
||||
|
||||
- name: Build Hero
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
set -e
|
||||
v -w -d use_openssl -enable-globals cli/hero.v -o cli/hero-${{ matrix.target }}
|
||||
- name: Upload
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: hero-${{ matrix.target }}
|
||||
path: cli/hero-${{ matrix.target }}
|
||||
|
||||
release_hero:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: cli/bins
|
||||
merge-multiple: true
|
||||
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref_name }}
|
||||
name: Release ${{ github.ref_name }}
|
||||
draft: false
|
||||
fail_on_unmatched_files: true
|
||||
generate_release_notes: true
|
||||
files: cli/bins/*
|
||||
32
.github/workflows/test.yml
vendored
Normal file
32
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Build on Linux & Run tests
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
os: ubuntu-latest
|
||||
short-name: linux-i64
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
|
||||
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!"
|
||||
- run: echo "🔎 The name of your branch is ${{ github.ref_name }} and your repository is ${{ github.repository }}."
|
||||
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup V & Herolib
|
||||
run: ./install_v.sh --herolib
|
||||
|
||||
- name: Do all the basic tests
|
||||
run: ./test_basic.vsh
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -7,6 +7,7 @@ vls.*
|
||||
vls.log
|
||||
node_modules/
|
||||
docs/
|
||||
vdocs/
|
||||
photonwrapper.so
|
||||
x
|
||||
.env
|
||||
@@ -25,6 +26,6 @@ dump.rdb
|
||||
output/
|
||||
*.db
|
||||
.stellar
|
||||
vdocs/
|
||||
data.ms/
|
||||
test_basic
|
||||
test_basic
|
||||
cli/hero
|
||||
19
README.md
19
README.md
@@ -1,13 +1,22 @@
|
||||
# herolib
|
||||
|
||||
a smaller version of herolib with only the items we need for hero
|
||||
|
||||
> [documentation here](https://freeflowuniverse.github.io/herolib/)
|
||||
> [documentation of the library](https://freeflowuniverse.github.io/herolib/)
|
||||
|
||||
## automated install
|
||||
## hero install for users
|
||||
|
||||
```bash
|
||||
curl 'https://raw.githubusercontent.com/freeflowuniverse/herolib/refs/heads/main/install_v.sh' > /tmp/install_v.sh
|
||||
curl https://raw.githubusercontent.com/freeflowuniverse/herolib/refs/heads/development/install_hero.sh > /tmp/install_hero.sh
|
||||
bash /tmp/install_hero.sh
|
||||
|
||||
```
|
||||
|
||||
this tool can be used to work with git, build books, play with hero AI, ...
|
||||
|
||||
## automated install for developers
|
||||
|
||||
```bash
|
||||
curl 'https://raw.githubusercontent.com/freeflowuniverse/herolib/refs/heads/development/install_v.sh' > /tmp/install_v.sh
|
||||
bash /tmp/install_v.sh --analyzer --herolib
|
||||
#DONT FORGET TO START A NEW SHELL (otherwise the paths will not be set)
|
||||
```
|
||||
@@ -16,7 +25,7 @@ bash /tmp/install_v.sh --analyzer --herolib
|
||||
|
||||
```bash
|
||||
|
||||
#~/code/github/freeflowuniverse/herolib/install_v.sh --help
|
||||
~/code/github/freeflowuniverse/herolib/install_v.sh --help
|
||||
|
||||
V & HeroLib Installer Script
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ The parser supports several formats:
|
||||
4. Comments: `// this is a comment`
|
||||
|
||||
Example:
|
||||
|
||||
```v
|
||||
text := "name:'John Doe' age:30 active:true // user details"
|
||||
params := paramsparser.new(text)!
|
||||
@@ -59,6 +60,7 @@ progress := params.get_percentage("progress")!
|
||||
The module supports various type conversions:
|
||||
|
||||
### Basic Types
|
||||
|
||||
- `get_int()`: Convert to int32
|
||||
- `get_u32()`: Convert to unsigned 32-bit integer
|
||||
- `get_u64()`: Convert to unsigned 64-bit integer
|
||||
@@ -67,10 +69,12 @@ The module supports various type conversions:
|
||||
- `get_percentage()`: Convert percentage string to float (e.g., "80%" → 0.8)
|
||||
|
||||
### Boolean Values
|
||||
|
||||
- `get_default_true()`: Returns true if value is empty, "1", "true", "y", or "yes"
|
||||
- `get_default_false()`: Returns false if value is empty, "0", "false", "n", or "no"
|
||||
|
||||
### Lists
|
||||
|
||||
The module provides robust support for parsing and converting lists:
|
||||
|
||||
```v
|
||||
@@ -89,6 +93,7 @@ clean_names := params.get_list_namefix("categories")!
|
||||
```
|
||||
|
||||
Supported list types:
|
||||
|
||||
- `get_list()`: String list
|
||||
- `get_list_u8()`, `get_list_u16()`, `get_list_u32()`, `get_list_u64()`: Unsigned integers
|
||||
- `get_list_i8()`, `get_list_i16()`, `get_list_int()`, `get_list_i64()`: Signed integers
|
||||
@@ -97,6 +102,7 @@ Supported list types:
|
||||
Each list method has a corresponding `_default` version that accepts a default value.
|
||||
|
||||
Valid list formats:
|
||||
|
||||
```v
|
||||
users: "john, jane,bob"
|
||||
ids: "1,2,3,4,5"
|
||||
@@ -133,4 +139,4 @@ get_timestamp(key string) !Duration
|
||||
|
||||
get_timestamp_default(key string, defval Duration) !Duration
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# module osal
|
||||
|
||||
|
||||
import as
|
||||
import as
|
||||
|
||||
```vlang
|
||||
import freeflowuniverse.herolib.osal
|
||||
@@ -46,7 +45,6 @@ pub enum CPUType {
|
||||
|
||||
## process
|
||||
|
||||
|
||||
### execute jobs
|
||||
|
||||
```v
|
||||
@@ -56,7 +54,7 @@ println(job2)
|
||||
//wont die, the result can be found in /tmp/execscripts
|
||||
mut job:=osal.exec(cmd:"ls dsds",ignore_error:true)?
|
||||
//this one has an error
|
||||
println(job)
|
||||
println(job)
|
||||
```
|
||||
|
||||
All scripts are executed from a file from /tmp/execscripts
|
||||
@@ -91,24 +89,24 @@ info returns like:
|
||||
|
||||
## other commands
|
||||
|
||||
fn bin_path() !string
|
||||
fn cmd_add(args_ CmdAddArgs) !
|
||||
copy a binary to the right location on the local computer . e.g. is /usr/local/bin on linux . e.g. is ~/hero/bin on osx . will also add the bin location to the path of .zprofile and .zshrc (different per platform)
|
||||
fn cmd_exists(cmd string) bool
|
||||
fn bin*path() !string
|
||||
fn cmd_add(args* CmdAddArgs) !
|
||||
copy a binary to the right location on the local computer . e.g. is /usr/local/bin on linux . e.g. is ~/hero/bin on osx . will also add the bin location to the path of .zprofile and .zshrc (different per platform)
|
||||
fn cmd*exists(cmd string) bool
|
||||
fn cmd_exists_profile(cmd string) bool
|
||||
fn cmd_path(cmd string) !string
|
||||
is same as executing which in OS returns path or error
|
||||
is same as executing which in OS returns path or error
|
||||
fn cmd_to_script_path(cmd Command) !string
|
||||
will return temporary path which then can be executed, is a helper function for making script out of command
|
||||
will return temporary path which then can be executed, is a helper function for making script out of command
|
||||
fn cputype() CPUType
|
||||
fn cputype_enum_from_string(cpytype string) CPUType
|
||||
Returns the enum value that matches the provided string for CPUType
|
||||
Returns the enum value that matches the provided string for CPUType
|
||||
fn dir_delete(path string) !
|
||||
remove all if it exists
|
||||
remove all if it exists
|
||||
fn dir_ensure(path string) !
|
||||
remove all if it exists
|
||||
remove all if it exists
|
||||
fn dir_reset(path string) !
|
||||
remove all if it exists and then (re-)create
|
||||
remove all if it exists and then (re-)create
|
||||
fn done_delete(key string) !
|
||||
fn done_exists(key string) bool
|
||||
fn done_get(key string) ?string
|
||||
@@ -117,45 +115,46 @@ fn done_get_str(key string) string
|
||||
fn done_print() !
|
||||
fn done_reset() !
|
||||
fn done_set(key string, val string) !
|
||||
fn download(args_ DownloadArgs) !pathlib.Path
|
||||
if name is not specified, then will be the filename part if the last ends in an extension like .md .txt .log .text ... the file will be downloaded
|
||||
fn download(args* DownloadArgs) !pathlib.Path
|
||||
if name is not specified, then will be the filename part if the last ends in an extension like .md .txt .log .text ... the file will be downloaded
|
||||
fn env_get(key string) !string
|
||||
Returns the requested environment variable if it exists or throws an error if it does not
|
||||
Returns the requested environment variable if it exists or throws an error if it does not
|
||||
fn env_get_all() map[string]string
|
||||
Returns all existing environment variables
|
||||
Returns all existing environment variables
|
||||
fn env_get_default(key string, def string) string
|
||||
Returns the requested environment variable if it exists or returns the provided default value if it does not
|
||||
Returns the requested environment variable if it exists or returns the provided default value if it does not
|
||||
fn env_set(args EnvSet)
|
||||
Sets an environment if it was not set before, it overwrites the enviroment variable if it exists and if overwrite was set to true (default)
|
||||
Sets an environment if it was not set before, it overwrites the enviroment variable if it exists and if overwrite was set to true (default)
|
||||
fn env_set_all(args EnvSetAll)
|
||||
Allows to set multiple enviroment variables in one go, if clear_before_set is true all existing environment variables will be unset before the operation, if overwrite_if_exists is set to true it will overwrite all existing enviromnent variables
|
||||
Allows to set multiple enviroment variables in one go, if clear_before_set is true all existing environment variables will be unset before the operation, if overwrite_if_exists is set to true it will overwrite all existing enviromnent variables
|
||||
fn env_unset(key string)
|
||||
Unsets an environment variable
|
||||
Unsets an environment variable
|
||||
fn env_unset_all()
|
||||
Unsets all environment variables
|
||||
Unsets all environment variables
|
||||
fn exec(cmd Command) !Job
|
||||
cmd is the cmd to execute can use ' ' and spaces . if \n in cmd it will write it to ext and then execute with bash . if die==false then will just return returncode,out but not return error . if stdout will show stderr and stdout . . if cmd starts with find or ls, will give to bash -c so it can execute . if cmd has no path, path will be found . . Command argument: .
|
||||
```
|
||||
name string // to give a name to your command, good to see logs...
|
||||
cmd string
|
||||
description string
|
||||
timeout int = 3600 // timeout in sec
|
||||
stdout bool = true
|
||||
stdout_log bool = true
|
||||
raise_error bool = true // if false, will not raise an error but still error report
|
||||
ignore_error bool // means if error will just exit and not raise, there will be no error reporting
|
||||
work_folder string // location where cmd will be executed
|
||||
environment map[string]string // env variables
|
||||
ignore_error_codes []int
|
||||
scriptpath string // is the path where the script will be put which is executed
|
||||
scriptkeep bool // means we don't remove the script
|
||||
debug bool // if debug will put +ex in the script which is being executed and will make sure script stays
|
||||
shell bool // means we will execute it in a shell interactive
|
||||
retry int
|
||||
interactive bool = true // make sure we run on non interactive way
|
||||
async bool
|
||||
runtime RunTime (.bash, .python)
|
||||
|
||||
cmd is the cmd to execute can use ' ' and spaces . if \n in cmd it will write it to ext and then execute with bash . if die==false then will just return returncode,out but not return error . if stdout will show stderr and stdout . . if cmd starts with find or ls, will give to bash -c so it can execute . if cmd has no path, path will be found . . Command argument: .
|
||||
|
||||
````
|
||||
name string // to give a name to your command, good to see logs...
|
||||
cmd string
|
||||
description string
|
||||
timeout int = 3600 // timeout in sec
|
||||
stdout bool = true
|
||||
stdout_log bool = true
|
||||
raise_error bool = true // if false, will not raise an error but still error report
|
||||
ignore_error bool // means if error will just exit and not raise, there will be no error reporting
|
||||
work_folder string // location where cmd will be executed
|
||||
environment map[string]string // env variables
|
||||
ignore_error_codes []int
|
||||
scriptpath string // is the path where the script will be put which is executed
|
||||
scriptkeep bool // means we don't remove the script
|
||||
debug bool // if debug will put +ex in the script which is being executed and will make sure script stays
|
||||
shell bool // means we will execute it in a shell interactive
|
||||
retry int
|
||||
interactive bool = true // make sure we run on non interactive way
|
||||
async bool
|
||||
runtime RunTime (.bash, .python)
|
||||
|
||||
returns Job:
|
||||
start time.Time
|
||||
end time.Time
|
||||
@@ -167,35 +166,37 @@ fn exec(cmd Command) !Job
|
||||
process os.Process
|
||||
```
|
||||
return Job .
|
||||
|
||||
fn exec_string(cmd Command) !string
|
||||
cmd is the cmd to execute can use ' ' and spaces if \n in cmd it will write it to ext and then execute with bash if die==false then will just return returncode,out but not return error if stdout will show stderr and stdout
|
||||
|
||||
cmd is the cmd to execute can use ' ' and spaces if \n in cmd it will write it to ext and then execute with bash if die==false then will just return returncode,out but not return error if stdout will show stderr and stdout
|
||||
|
||||
if cmd starts with find or ls, will give to bash -c so it can execute if cmd has no path, path will be found $... are remplaced by environment arguments TODO:implement
|
||||
|
||||
|
||||
Command argument: cmd string timeout int = 600 stdout bool = true die bool = true debug bool
|
||||
|
||||
|
||||
return what needs to be executed can give it to bash -c ...
|
||||
fn execute_debug(cmd string) !string
|
||||
|
||||
fn execute*debug(cmd string) !string
|
||||
fn execute_interactive(cmd string) !
|
||||
shortcut to execute a job interactive means in shell
|
||||
shortcut to execute a job interactive means in shell
|
||||
fn execute_ok(cmd string) bool
|
||||
executes a cmd, if not error return true
|
||||
executes a cmd, if not error return true
|
||||
fn execute_silent(cmd string) !string
|
||||
shortcut to execute a job silent
|
||||
shortcut to execute a job silent
|
||||
fn execute_stdout(cmd string) !string
|
||||
shortcut to execute a job to stdout
|
||||
shortcut to execute a job to stdout
|
||||
fn file_read(path string) !string
|
||||
fn file_write(path string, text string) !
|
||||
fn get_logger() log.Logger
|
||||
Returns a logger object and allows you to specify via environment argument OSAL_LOG_LEVEL the debug level
|
||||
Returns a logger object and allows you to specify via environment argument OSAL_LOG_LEVEL the debug level
|
||||
fn hero_path() !string
|
||||
fn hostname() !string
|
||||
fn initname() !string
|
||||
e.g. systemd, bash, zinit
|
||||
e.g. systemd, bash, zinit
|
||||
fn ipaddr_pub_get() !string
|
||||
Returns the ipaddress as known on the public side is using resolver4.opendns.com
|
||||
Returns the ipaddress as known on the public side is using resolver4.opendns.com
|
||||
fn is_linux() bool
|
||||
fn is_linux_arm() bool
|
||||
fn is_linux_arm()! bool
|
||||
fn is_linux_intel() bool
|
||||
fn is_osx() bool
|
||||
fn is_osx_arm() bool
|
||||
@@ -205,24 +206,23 @@ fn load_env_file(file_path string) !
|
||||
fn memdb_exists(key string) bool
|
||||
fn memdb_get(key string) string
|
||||
fn memdb_set(key string, val string)
|
||||
fn package_install(name_ string) !
|
||||
install a package will use right commands per platform
|
||||
fn package_install(name* string) !
|
||||
install a package will use right commands per platform
|
||||
fn package_refresh() !
|
||||
update the package list
|
||||
update the package list
|
||||
fn ping(args PingArgs) PingResult
|
||||
if reached in timout result will be True address is e.g. 8.8.8.8 ping means we check if the destination responds
|
||||
if reached in timout result will be True address is e.g. 8.8.8.8 ping means we check if the destination responds
|
||||
fn platform() PlatformType
|
||||
fn platform_enum_from_string(platform string) PlatformType
|
||||
fn process_exists(pid int) bool
|
||||
fn process_exists_byname(name string) !bool
|
||||
fn process_kill_recursive(args ProcessKillArgs) !
|
||||
kill process and all the ones underneith
|
||||
kill process and all the ones underneith
|
||||
fn processinfo_children(pid int) !ProcessMap
|
||||
get all children of 1 process
|
||||
get all children of 1 process
|
||||
fn processinfo_get(pid int) !ProcessInfo
|
||||
get process info from 1 specific process returns
|
||||
```
|
||||
pub struct ProcessInfo {
|
||||
get process info from 1 specific process returns
|
||||
` pub struct ProcessInfo {
|
||||
pub mut:
|
||||
cpu_perc f32
|
||||
mem_perc f32
|
||||
@@ -232,209 +232,210 @@ fn processinfo_get(pid int) !ProcessInfo
|
||||
//resident memory
|
||||
rss int
|
||||
}
|
||||
```
|
||||
`
|
||||
fn processinfo_get_byname(name string) ![]ProcessInfo
|
||||
fn processinfo_with_children(pid int) !ProcessMap
|
||||
return the process and its children
|
||||
return the process and its children
|
||||
fn processmap_get() !ProcessMap
|
||||
make sure to use new first, so that the connection has been initted then you can get it everywhere
|
||||
make sure to use new first, so that the connection has been initted then you can get it everywhere
|
||||
fn profile_path() string
|
||||
fn profile_path_add(args ProfilePathAddArgs) !
|
||||
add the following path to a profile
|
||||
add the following path to a profile
|
||||
fn profile_path_add_hero() !string
|
||||
fn profile_path_source() string
|
||||
return the source statement if the profile exists
|
||||
return the source statement if the profile exists
|
||||
fn profile_path_source_and() string
|
||||
return source $path && . or empty if it doesn't exist
|
||||
return source $path && . or empty if it doesn't exist
|
||||
fn sleep(duration int)
|
||||
sleep in seconds
|
||||
sleep in seconds
|
||||
fn tcp_port_test(args TcpPortTestArgs) bool
|
||||
test if a tcp port answers
|
||||
```
|
||||
address string //192.168.8.8
|
||||
test if a tcp port answers
|
||||
` address string //192.168.8.8
|
||||
port int = 22
|
||||
timeout u16 = 2000 // total time in milliseconds to keep on trying
|
||||
```
|
||||
`
|
||||
fn user_add(args UserArgs) !int
|
||||
add's a user if the user does not exist yet
|
||||
add's a user if the user does not exist yet
|
||||
fn user_exists(username string) bool
|
||||
fn user_id_get(username string) !int
|
||||
fn usr_local_path() !string
|
||||
/usr/local on linux, ${os.home_dir()}/hero on osx
|
||||
/usr/local on linux, ${os.home_dir()}/hero on osx
|
||||
fn whoami() !string
|
||||
fn write_flags[T](options T) string
|
||||
enum CPUType {
|
||||
unknown
|
||||
intel
|
||||
arm
|
||||
intel32
|
||||
arm32
|
||||
unknown
|
||||
intel
|
||||
arm
|
||||
intel32
|
||||
arm32
|
||||
}
|
||||
enum ErrorType {
|
||||
exec
|
||||
timeout
|
||||
args
|
||||
exec
|
||||
timeout
|
||||
args
|
||||
}
|
||||
enum JobStatus {
|
||||
init
|
||||
running
|
||||
error_exec
|
||||
error_timeout
|
||||
error_args
|
||||
done
|
||||
init
|
||||
running
|
||||
error_exec
|
||||
error_timeout
|
||||
error_args
|
||||
done
|
||||
}
|
||||
enum PMState {
|
||||
init
|
||||
ok
|
||||
old
|
||||
init
|
||||
ok
|
||||
old
|
||||
}
|
||||
enum PingResult {
|
||||
ok
|
||||
timeout // timeout from ping
|
||||
unknownhost // means we don't know the hostname its a dns issue
|
||||
ok
|
||||
timeout // timeout from ping
|
||||
unknownhost // means we don't know the hostname its a dns issue
|
||||
}
|
||||
enum PlatformType {
|
||||
unknown
|
||||
osx
|
||||
ubuntu
|
||||
alpine
|
||||
arch
|
||||
suse
|
||||
unknown
|
||||
osx
|
||||
ubuntu
|
||||
alpine
|
||||
arch
|
||||
suse
|
||||
}
|
||||
enum RunTime {
|
||||
bash
|
||||
python
|
||||
heroscript
|
||||
herocmd
|
||||
v
|
||||
bash
|
||||
python
|
||||
heroscript
|
||||
herocmd
|
||||
v
|
||||
}
|
||||
struct CmdAddArgs {
|
||||
pub mut:
|
||||
cmdname string
|
||||
source string @[required] // path where the binary is
|
||||
symlink bool // if rather than copy do a symlink
|
||||
reset bool // if existing cmd will delete
|
||||
// bin_repo_url string = 'https://github.com/freeflowuniverse/freeflow_binary' // binary where we put the results
|
||||
cmdname string
|
||||
source string @[required] // path where the binary is
|
||||
symlink bool // if rather than copy do a symlink
|
||||
reset bool // if existing cmd will delete
|
||||
// bin_repo_url string = 'https://github.com/freeflowuniverse/freeflow_binary' // binary where we put the results
|
||||
}
|
||||
struct Command {
|
||||
pub mut:
|
||||
name string // to give a name to your command, good to see logs...
|
||||
cmd string
|
||||
description string
|
||||
timeout int = 3600 // timeout in sec
|
||||
stdout bool = true
|
||||
stdout_log bool = true
|
||||
raise_error bool = true // if false, will not raise an error but still error report
|
||||
ignore_error bool // means if error will just exit and not raise, there will be no error reporting
|
||||
work_folder string // location where cmd will be executed
|
||||
environment map[string]string // env variables
|
||||
ignore_error_codes []int
|
||||
scriptpath string // is the path where the script will be put which is executed
|
||||
scriptkeep bool // means we don't remove the script
|
||||
debug bool // if debug will put +ex in the script which is being executed and will make sure script stays
|
||||
shell bool // means we will execute it in a shell interactive
|
||||
retry int
|
||||
interactive bool = true
|
||||
async bool
|
||||
runtime RunTime
|
||||
name string // to give a name to your command, good to see logs...
|
||||
cmd string
|
||||
description string
|
||||
timeout int = 3600 // timeout in sec
|
||||
stdout bool = true
|
||||
stdout_log bool = true
|
||||
raise_error bool = true // if false, will not raise an error but still error report
|
||||
ignore_error bool // means if error will just exit and not raise, there will be no error reporting
|
||||
work_folder string // location where cmd will be executed
|
||||
environment map[string]string // env variables
|
||||
ignore_error_codes []int
|
||||
scriptpath string // is the path where the script will be put which is executed
|
||||
scriptkeep bool // means we don't remove the script
|
||||
debug bool // if debug will put +ex in the script which is being executed and will make sure script stays
|
||||
shell bool // means we will execute it in a shell interactive
|
||||
retry int
|
||||
interactive bool = true
|
||||
async bool
|
||||
runtime RunTime
|
||||
}
|
||||
struct DownloadArgs {
|
||||
pub mut:
|
||||
name string // optional (otherwise derived out of filename)
|
||||
url string
|
||||
reset bool // will remove
|
||||
hash string // if hash is known, will verify what hash is
|
||||
dest string // if specified will copy to that destination
|
||||
timeout int = 180
|
||||
retry int = 3
|
||||
minsize_kb u32 = 10 // is always in kb
|
||||
maxsize_kb u32
|
||||
expand_dir string
|
||||
expand_file string
|
||||
name string // optional (otherwise derived out of filename)
|
||||
url string
|
||||
reset bool // will remove
|
||||
hash string // if hash is known, will verify what hash is
|
||||
dest string // if specified will copy to that destination
|
||||
timeout int = 180
|
||||
retry int = 3
|
||||
minsize_kb u32 = 10 // is always in kb
|
||||
maxsize_kb u32
|
||||
expand_dir string
|
||||
expand_file string
|
||||
}
|
||||
struct EnvSet {
|
||||
pub mut:
|
||||
key string @[required]
|
||||
value string @[required]
|
||||
overwrite bool = true
|
||||
key string @[required]
|
||||
value string @[required]
|
||||
overwrite bool = true
|
||||
}
|
||||
struct EnvSetAll {
|
||||
pub mut:
|
||||
env map[string]string
|
||||
clear_before_set bool
|
||||
overwrite_if_exists bool = true
|
||||
env map[string]string
|
||||
clear_before_set bool
|
||||
overwrite_if_exists bool = true
|
||||
}
|
||||
struct Job {
|
||||
pub mut:
|
||||
start time.Time
|
||||
end time.Time
|
||||
cmd Command
|
||||
output string
|
||||
error string
|
||||
exit_code int
|
||||
status JobStatus
|
||||
process ?&os.Process @[skip; str: skip]
|
||||
runnr int // nr of time it runs, is for retry
|
||||
start time.Time
|
||||
end time.Time
|
||||
cmd Command
|
||||
output string
|
||||
error string
|
||||
exit_code int
|
||||
status JobStatus
|
||||
process ?&os.Process @[skip; str: skip]
|
||||
runnr int // nr of time it runs, is for retry
|
||||
}
|
||||
fn (mut job Job) execute_retry() !
|
||||
execute the job and wait on result will retry as specified
|
||||
execute the job and wait on result will retry as specified
|
||||
fn (mut job Job) execute() !
|
||||
execute the job, start process, process will not be closed . important you need to close the process later by job.close()! otherwise we get zombie processes
|
||||
execute the job, start process, process will not be closed . important you need to close the process later by job.close()! otherwise we get zombie processes
|
||||
fn (mut job Job) wait() !
|
||||
wait till the job finishes or goes in error
|
||||
wait till the job finishes or goes in error
|
||||
fn (mut job Job) process() !
|
||||
process (read std.err and std.out of process)
|
||||
process (read std.err and std.out of process)
|
||||
fn (mut job Job) close() !
|
||||
will wait & close
|
||||
will wait & close
|
||||
struct JobError {
|
||||
Error
|
||||
Error
|
||||
pub mut:
|
||||
job Job
|
||||
error_type ErrorType
|
||||
job Job
|
||||
error_type ErrorType
|
||||
}
|
||||
struct PingArgs {
|
||||
pub mut:
|
||||
address string @[required]
|
||||
count u8 = 1 // the ping is successful if it got count amount of replies from the other side
|
||||
timeout u16 = 1 // the time in which the other side should respond in seconds
|
||||
retry u8
|
||||
address string @[required]
|
||||
count u8 = 1 // the ping is successful if it got count amount of replies from the other side
|
||||
timeout u16 = 1 // the time in which the other side should respond in seconds
|
||||
retry u8
|
||||
}
|
||||
struct ProcessInfo {
|
||||
pub mut:
|
||||
cpu_perc f32
|
||||
mem_perc f32
|
||||
cmd string
|
||||
pid int
|
||||
ppid int // parentpid
|
||||
// resident memory
|
||||
rss int
|
||||
cpu_perc f32
|
||||
mem_perc f32
|
||||
cmd string
|
||||
pid int
|
||||
ppid int // parentpid
|
||||
// resident memory
|
||||
rss int
|
||||
}
|
||||
fn (mut p ProcessInfo) str() string
|
||||
struct ProcessKillArgs {
|
||||
pub mut:
|
||||
name string
|
||||
pid int
|
||||
name string
|
||||
pid int
|
||||
}
|
||||
struct ProcessMap {
|
||||
pub mut:
|
||||
processes []ProcessInfo
|
||||
lastscan time.Time
|
||||
state PMState
|
||||
pids []int
|
||||
processes []ProcessInfo
|
||||
lastscan time.Time
|
||||
state PMState
|
||||
pids []int
|
||||
}
|
||||
struct ProfilePathAddArgs {
|
||||
pub mut:
|
||||
path string @[required]
|
||||
todelete string // see which one to remove
|
||||
path string @[required]
|
||||
todelete string // see which one to remove
|
||||
}
|
||||
struct TcpPortTestArgs {
|
||||
pub mut:
|
||||
address string @[required] // 192.168.8.8
|
||||
port int = 22
|
||||
timeout u16 = 2000 // total time in milliseconds to keep on trying
|
||||
address string @[required] // 192.168.8.8
|
||||
port int = 22
|
||||
timeout u16 = 2000 // total time in milliseconds to keep on trying
|
||||
}
|
||||
struct UserArgs {
|
||||
pub mut:
|
||||
name string @[required]
|
||||
name string @[required]
|
||||
}
|
||||
*
|
||||
|
||||
-
|
||||
````
|
||||
|
||||
93
aiprompts/ai_instruct/instruct.md
Normal file
93
aiprompts/ai_instruct/instruct.md
Normal file
@@ -0,0 +1,93 @@
|
||||
We have our own instruction language called heroscript, below you will find details how to use it.
|
||||
|
||||
## heroscript
|
||||
|
||||
|
||||
Heroscript is our small scripting language which is used for communicating with our digital tools like calendar management.
|
||||
|
||||
which has following structure
|
||||
|
||||
```heroscript
|
||||
|
||||
!!calendar.event_add
|
||||
title: 'go to dentist'
|
||||
start: '2025/03/01'
|
||||
description: '
|
||||
a description can be multiline
|
||||
|
||||
like this
|
||||
'
|
||||
|
||||
!!calendar.event_delete
|
||||
title: 'go to dentist'
|
||||
|
||||
```
|
||||
|
||||
- the format is !!$actor.$action (there is no space before !!)
|
||||
- every parameter comes on next line with spaces in front (4 spaces, always use 4 spaces, dont make variation)
|
||||
- every actor.action starts with !!
|
||||
- the first part is the actor e.g. calendar in this case
|
||||
- the 2e part is the action name
|
||||
- multilines are supported see the description field
|
||||
|
||||
below you will find the instructions for different actors, comments how to use it are behind # which means not part of the the definition itself
|
||||
|
||||
## remarks on parameters used
|
||||
|
||||
- date
|
||||
- format of the date is yyyy/mm/dd hh:mm:ss
|
||||
- +1h means 1 hour later than now
|
||||
- +1m means 1 min later than now
|
||||
- +1d means 1 day later than now
|
||||
- same for -1h, -1m, -1d
|
||||
- money expressed as
|
||||
- $val $cursymbol
|
||||
- $cursymbol is 3 letters e.g. USD, capital
|
||||
- lists are comma separated and '...' around
|
||||
|
||||
|
||||
## generic instructions
|
||||
|
||||
- do not add information if not specifically asked for
|
||||
|
||||
|
||||
## circle
|
||||
|
||||
every actor action happens in a circle, a user can ask to switch circles, command available is
|
||||
|
||||
```
|
||||
!!circle.switch
|
||||
name: 'project x'
|
||||
|
||||
```
|
||||
|
||||
## calendar
|
||||
|
||||
```heroscript
|
||||
|
||||
!!calendar.event_add
|
||||
title: 'go to dentist'
|
||||
start: '2025/03/01'
|
||||
end: '+1h' #if + notation used is later than the start
|
||||
description: '
|
||||
a description can be multiline
|
||||
|
||||
like this
|
||||
'
|
||||
attendees: 'tim, rob'
|
||||
|
||||
!!calendar.event_delete
|
||||
title: 'go to dentist'
|
||||
|
||||
```
|
||||
|
||||
## NOW DO ONE
|
||||
|
||||
schedule event tomorrow 10 am, for 1h, with tim & rob, we want to product management threefold
|
||||
now is friday jan 17
|
||||
|
||||
only give me the instructions needed, only return the heroscript no text around
|
||||
|
||||
if not clear enough ask the user for more info
|
||||
|
||||
if not sure do not invent, only give instructions as really asked for
|
||||
58
aiprompts/ai_instruct/instruct2.md
Normal file
58
aiprompts/ai_instruct/instruct2.md
Normal file
@@ -0,0 +1,58 @@
|
||||
|
||||
|
||||
# how to manage my agenda
|
||||
|
||||
## Metadata for function calling
|
||||
|
||||
functions_metadata = [
|
||||
{
|
||||
"name": "event_add",
|
||||
"description": "Adds a calendar event.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {"type": "string", "description": "Title of the event."},
|
||||
"start": {"type": "string", "description": "Start date and time in 'YYYY/MM/DD hh:mm' format."},
|
||||
"end": {"type": "string", "description": "End date or duration (e.g., +2h)."},
|
||||
"description": {"type": "string", "description": "Event description."},
|
||||
"attendees": {"type": "string", "description": "Comma-separated list of attendees' emails."},
|
||||
},
|
||||
"required": ["title", "start"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "event_delete",
|
||||
"description": "Deletes a calendar event by title.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {"type": "string", "description": "Title of the event to delete."},
|
||||
},
|
||||
"required": ["title"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
## example call
|
||||
|
||||
{
|
||||
"function": "event_add",
|
||||
"parameters": {
|
||||
"title": "Team Sync",
|
||||
"start": "2025/02/01 10:00",
|
||||
"end": "+1h",
|
||||
"description": "",
|
||||
"attendees": "alice@example.com, bob@example.com"
|
||||
}
|
||||
}
|
||||
|
||||
## how to use
|
||||
|
||||
Parse the user query to determine intent (e.g., "schedule" maps to event_add, "cancel" maps to event_delete).
|
||||
|
||||
Extract required parameters (e.g., title, start date).
|
||||
|
||||
Invoke the appropriate function with the extracted parameters.
|
||||
|
||||
Return the function's result as the response.
|
||||
|
||||
72
aiprompts/ai_instruct/twin.md
Normal file
72
aiprompts/ai_instruct/twin.md
Normal file
@@ -0,0 +1,72 @@
|
||||
you represent a digital twin for a user, the user talks to you to get things done for his digital life
|
||||
|
||||
you will interprete the instructions the user prompts, and figure out the multiple instructions, break it up and categorize them as follows:
|
||||
|
||||
- cat: calendar
|
||||
- manage calendar for the user
|
||||
- cat: contacts
|
||||
- manage contacts for the user
|
||||
- cat: communicate
|
||||
- communicate with others using text
|
||||
- cat: tasks
|
||||
- manage my tasks
|
||||
- cat: circle
|
||||
- define circle we work in, a circle is like a project context in which we do above, so can be for a team or a project, try to find it
|
||||
- cat: sysadmin
|
||||
- system administration, e.g. creation of virtual machines (VM), containers, start stop see monitoring information
|
||||
- cat: notes
|
||||
- anything to do with transctiptions, note takings, summaries
|
||||
- how we recorded meetings e.g. zoom, google meet, ...
|
||||
- how we are looking for info in meeting
|
||||
- cat: unknown
|
||||
- anything we can't understand
|
||||
|
||||
try to understand what user wants and put it in blocks (one per category for the action e.g. calendar)
|
||||
|
||||
- before each block (instruction) put ###########################
|
||||
- in the first line mention the category as defined above, only mention this category once and there is only one per block
|
||||
- then reformulate in clear instructions what needs to be done after that
|
||||
- the instructions are put in lines following the instruction (not in the instruction line)
|
||||
- only make blocks for instructions as given
|
||||
|
||||
what you output will be used further to do more specific prompting
|
||||
|
||||
if circle, always put these instructions first
|
||||
|
||||
if time is specified put the time as follows
|
||||
|
||||
- if relative e.g. next week, tomorrow, after tomorrow, in one hour then start from the current time
|
||||
- time is in format: YYYY/MM/DD hh:mm format
|
||||
- current time is friday 2025/01/17 10:12
|
||||
- if e.g. next month jan, or next tuesday then don't repeat the browd instructions like tuesday, this just show the date as YYYY/MM/DD hh:mm
|
||||
|
||||
if not clear for a date, don't invent just repeat the original instruction
|
||||
|
||||
if the category is not clear, just use unknown
|
||||
|
||||
|
||||
NOW DO EXAMPLE 1
|
||||
|
||||
```
|
||||
hi good morning
|
||||
|
||||
Can you help me find meetings I have done around research of threefold in the last 2 weeks
|
||||
|
||||
I need to create a new VM, 4 GB of memory, 2 vcpu, in belgium, with ubuntu
|
||||
|
||||
I would like do schedule a meeting, need to go to the dentist tomorrow at 10am, its now friday jan 17
|
||||
|
||||
also remind me I need to do the dishes after tomorrow in the morning
|
||||
|
||||
can you also add jef as a contact, he lives in geneva, he is doing something about rocketscience
|
||||
|
||||
I need to paint my wall in my room next week wednesday
|
||||
|
||||
cancel all my meetings next sunday
|
||||
|
||||
can you give me list of my contacts who live in geneva and name sounds like tom
|
||||
|
||||
send a message to my mother, I am seeing here in 3 days at 7pm
|
||||
|
||||
```
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
when I generate vlang scripts I will always use .vsh extension and use following as first line:
|
||||
|
||||
```
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
```
|
||||
|
||||
- a .vsh is a v shell script and can be executed as is, no need to use v ...
|
||||
@@ -21,7 +21,7 @@ when I generate vlang scripts I will always use .vsh extension and use following
|
||||
## to do argument parsing use following examples
|
||||
|
||||
```v
|
||||
#!/usr/bin/env -S v -n -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import flag
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# module ui.console.chalk
|
||||
|
||||
|
||||
Chalk offers functions:- `console.color_fg(text string, color string)` - To change the foreground color.
|
||||
|
||||
- `console.color_bg(text string, color string)` - To change the background color.
|
||||
- `console.style(text string, style string)` - To change the text style.
|
||||
|
||||
@@ -18,6 +18,7 @@ println('I am really ' + console.color_fg(console.style('ANGRY', 'bold'), 'red')
|
||||
```
|
||||
|
||||
Available colors:- black
|
||||
|
||||
- red
|
||||
- green
|
||||
- yellow
|
||||
@@ -36,6 +37,7 @@ Available colors:- black
|
||||
- white
|
||||
|
||||
Available styles:- bold
|
||||
|
||||
- dim
|
||||
- underline
|
||||
- blink
|
||||
|
||||
@@ -2,17 +2,16 @@
|
||||
|
||||
has mechanisms to print better to console, see the methods below
|
||||
|
||||
import as
|
||||
import as
|
||||
|
||||
```vlang
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
```
|
||||
|
||||
|
||||
## Methods
|
||||
|
||||
```v
|
||||
````v
|
||||
|
||||
fn clear()
|
||||
//reset the console screen
|
||||
@@ -86,13 +85,12 @@ fn style(c Style) string
|
||||
|
||||
fn trim(c_ string) string
|
||||
|
||||
```
|
||||
````
|
||||
|
||||
## Console Object
|
||||
|
||||
Is used to ask feedback to users
|
||||
|
||||
|
||||
```v
|
||||
|
||||
struct UIConsole {
|
||||
@@ -105,14 +103,14 @@ pub mut:
|
||||
}
|
||||
|
||||
//DropDownArgs:
|
||||
// - description string
|
||||
// - items []string
|
||||
// - warning string
|
||||
// - description string
|
||||
// - items []string
|
||||
// - warning string
|
||||
// - clear bool = true
|
||||
|
||||
|
||||
fn (mut c UIConsole) ask_dropdown_int(args_ DropDownArgs) !int
|
||||
// return the dropdown as an int
|
||||
// return the dropdown as an int
|
||||
|
||||
fn (mut c UIConsole) ask_dropdown_multiple(args_ DropDownArgs) ![]string
|
||||
// result can be multiple, aloso can select all description string items []string warning string clear bool = true
|
||||
@@ -135,7 +133,7 @@ fn (mut c UIConsole) ask_time(args QuestionArgs) !string
|
||||
fn (mut c UIConsole) ask_date(args QuestionArgs) !string
|
||||
|
||||
fn (mut c UIConsole) ask_yesno(args YesNoArgs) !bool
|
||||
// yes is true, no is false
|
||||
// yes is true, no is false
|
||||
// args:
|
||||
// - description string
|
||||
// - question string
|
||||
@@ -148,14 +146,11 @@ fn (mut c UIConsole) status() string
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
## enums
|
||||
|
||||
|
||||
```v
|
||||
enum BackgroundColor {
|
||||
default_color = 49 // 'default' is a reserved keyword in V
|
||||
default_color = 49 // 'default' is a reserved keyword in V
|
||||
black = 40
|
||||
red = 41
|
||||
green = 42
|
||||
@@ -174,7 +169,7 @@ enum BackgroundColor {
|
||||
white = 107
|
||||
}
|
||||
enum ForegroundColor {
|
||||
default_color = 39 // 'default' is a reserved keyword in V
|
||||
default_color = 39 // 'default' is a reserved keyword in V
|
||||
white = 97
|
||||
black = 30
|
||||
red = 31
|
||||
|
||||
@@ -2238,7 +2238,7 @@ be faster, since there is no need for a re-compilation of a script, that has not
|
||||
An example `deploy.vsh`:
|
||||
|
||||
```v oksyntax
|
||||
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
// Note: The shebang line above, associates the .vsh file to V on Unix-like systems,
|
||||
// so it can be run just by specifying the path to the .vsh file, once it's made
|
||||
@@ -2300,11 +2300,11 @@ Whilst V does normally not allow vsh scripts without the designated file extensi
|
||||
to circumvent this rule and have a file with a fully custom name and shebang. Whilst this feature
|
||||
exists it is only recommended for specific usecases like scripts that will be put in the path and
|
||||
should **not** be used for things like build or deploy scripts. To access this feature start the
|
||||
file with `#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
file with `#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
the built executable. This will run in crun mode so it will only rebuild if changes to the script
|
||||
were made and keep the binary as `tmp.<scriptfilename>`. **Caution**: if this filename already
|
||||
exists the file will be overridden. If you want to rebuild each time and not keep this binary
|
||||
instead use `#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
instead use `#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
# Appendices
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
|
||||
# how to run the vshell example scripts
|
||||
|
||||
this is how we want example scripts to be, see the first line
|
||||
|
||||
```vlang
|
||||
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.installers.sysadmintools.daguserver
|
||||
|
||||
@@ -18,4 +17,3 @@ the files are in ~/code/github/freeflowuniverse/herolib/examples for herolib
|
||||
## important instructions
|
||||
|
||||
- never use fn main() in a .vsh script
|
||||
|
||||
|
||||
1
cli/.gitignore
vendored
Normal file
1
cli/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
hero
|
||||
@@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -parallel-cc -enable-globals run
|
||||
// #!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import flag
|
||||
@@ -44,7 +45,7 @@ compile_cmd := if os.user_os() == 'macos' {
|
||||
if prod_mode {
|
||||
'v -enable-globals -w -n -prod hero.v'
|
||||
} else {
|
||||
'v -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals hero.v'
|
||||
'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals hero.v'
|
||||
}
|
||||
} else {
|
||||
if prod_mode {
|
||||
@@ -65,7 +66,7 @@ os.chmod('hero', 0o755) or { panic('Failed to make hero binary executable: ${err
|
||||
|
||||
// Ensure destination directory exists
|
||||
os.mkdir_all(os.dir(heropath)) or { panic('Failed to create directory ${os.dir(heropath)}: ${err}') }
|
||||
|
||||
println(heropath)
|
||||
// Copy to destination paths
|
||||
os.cp('hero', heropath) or { panic('Failed to copy hero binary to ${heropath}: ${err}') }
|
||||
os.cp('hero', '/tmp/hero') or { panic('Failed to copy hero binary to /tmp/hero: ${err}') }
|
||||
|
||||
@@ -89,5 +89,9 @@ fn hero_upload() ! {
|
||||
}
|
||||
|
||||
fn main() {
|
||||
//os.execute_or_panic('${os.home_dir()}/code/github/freeflowuniverse/herolib/cli/compile.vsh -p')
|
||||
println("compile hero can take 60 sec+ on osx.")
|
||||
os.execute_or_panic('${os.home_dir()}/code/github/freeflowuniverse/herolib/cli/compile.vsh -p')
|
||||
println( "upload:")
|
||||
hero_upload() or { eprintln(err) exit(1) }
|
||||
}
|
||||
|
||||
33
cli/hero.v
33
cli/hero.v
@@ -1,15 +1,17 @@
|
||||
module main
|
||||
|
||||
import os
|
||||
import cli { Command, Flag }
|
||||
import cli { Command }
|
||||
import freeflowuniverse.herolib.core.herocmds
|
||||
// import freeflowuniverse.herolib.hero.cmds
|
||||
// import freeflowuniverse.herolib.hero.publishing
|
||||
import freeflowuniverse.herolib.installers.base
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.ui
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
// import freeflowuniverse.herolib.core.playcmds
|
||||
import freeflowuniverse.herolib.core.playcmds
|
||||
|
||||
fn playcmds_do(path string) ! {
|
||||
mut plbook := playbook.new(path: path)!
|
||||
@@ -29,17 +31,9 @@ fn do() ! {
|
||||
mut cmd := Command{
|
||||
name: 'hero'
|
||||
description: 'Your HERO toolset.'
|
||||
version: '2.0.0'
|
||||
version: '1.0.6'
|
||||
}
|
||||
|
||||
cmd.add_flag(Flag{
|
||||
flag: .string
|
||||
name: 'url'
|
||||
abbrev: 'u'
|
||||
global: true
|
||||
description: 'url of playbook'
|
||||
})
|
||||
|
||||
// herocmds.cmd_run_add_flags(mut cmd)
|
||||
|
||||
mut toinstall := false
|
||||
@@ -47,7 +41,7 @@ fn do() ! {
|
||||
toinstall = true
|
||||
}
|
||||
|
||||
if osal.is_osx() {
|
||||
if core.is_osx()! {
|
||||
if !osal.cmd_exists('brew') {
|
||||
console.clear()
|
||||
mut myui := ui.new()!
|
||||
@@ -70,9 +64,9 @@ fn do() ! {
|
||||
|
||||
base.redis_install()!
|
||||
|
||||
//herocmds.cmd_bootstrap(mut cmd)
|
||||
// herocmds.cmd_bootstrap(mut cmd)
|
||||
// herocmds.cmd_run(mut cmd)
|
||||
// herocmds.cmd_git(mut cmd)
|
||||
herocmds.cmd_git(mut cmd)
|
||||
// herocmds.cmd_init(mut cmd)
|
||||
// herocmds.cmd_imagedownsize(mut cmd)
|
||||
// herocmds.cmd_biztools(mut cmd)
|
||||
@@ -81,12 +75,13 @@ fn do() ! {
|
||||
// herocmds.cmd_installers(mut cmd)
|
||||
// herocmds.cmd_configure(mut cmd)
|
||||
// herocmds.cmd_postgres(mut cmd)
|
||||
// herocmds.cmd_mdbook(mut cmd)
|
||||
herocmds.cmd_mdbook(mut cmd)
|
||||
// herocmds.cmd_luadns(mut cmd)
|
||||
//herocmds.cmd_caddy(mut cmd)
|
||||
//herocmds.cmd_zola(mut cmd)
|
||||
// herocmds.cmd_caddy(mut cmd)
|
||||
// herocmds.cmd_zola(mut cmd)
|
||||
// herocmds.cmd_juggler(mut cmd)
|
||||
// herocmds.cmd_generator(mut cmd)
|
||||
herocmds.cmd_generator(mut cmd)
|
||||
herocmds.cmd_docusaurus(mut cmd)
|
||||
// herocmds.cmd_docsorter(mut cmd)
|
||||
// cmd.add_command(publishing.cmd_publisher(pre_func))
|
||||
cmd.setup()
|
||||
@@ -99,4 +94,4 @@ fn main() {
|
||||
|
||||
fn pre_func(cmd Command) ! {
|
||||
herocmds.plbook_run(cmd)!
|
||||
}
|
||||
}
|
||||
40
doc.vsh
40
doc.vsh
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
|
||||
@@ -26,9 +26,9 @@ os.chdir(herolib_path) or {
|
||||
panic('Failed to change directory to herolib: ${err}')
|
||||
}
|
||||
|
||||
os.rmdir_all('_docs') or {}
|
||||
os.rmdir_all('docs') or {}
|
||||
os.rmdir_all('vdocs') or {}
|
||||
os.mkdir_all('_docs') or {}
|
||||
os.mkdir_all('docs') or {}
|
||||
os.mkdir_all('vdocs') or {}
|
||||
|
||||
// Generate HTML documentation
|
||||
println('Generating HTML documentation...')
|
||||
@@ -42,13 +42,12 @@ os.chdir(abs_dir_of_script) or {
|
||||
|
||||
// Generate Markdown documentation
|
||||
println('Generating Markdown documentation...')
|
||||
os.rmdir_all('vdocs') or {}
|
||||
|
||||
// if os.system('v doc -m -no-color -f md -o ../vdocs/v/') != 0 {
|
||||
// panic('Failed to generate V markdown documentation')
|
||||
// }
|
||||
|
||||
if os.system('v doc -m -no-color -f md -o vdocs/herolib/') != 0 {
|
||||
if os.system('v doc -m -no-color -f md -o vdocs/') != 0 {
|
||||
panic('Failed to generate Hero markdown documentation')
|
||||
}
|
||||
|
||||
@@ -62,4 +61,33 @@ $if !linux {
|
||||
}
|
||||
}
|
||||
|
||||
// Create Jekyll required files
|
||||
println('Creating Jekyll files...')
|
||||
os.mkdir_all('docs/assets/css') or {}
|
||||
|
||||
// Create style.scss
|
||||
style_content := '---\n---\n\n@import "{{ site.theme }}";'
|
||||
os.write_file('docs/assets/css/style.scss', style_content) or {
|
||||
panic('Failed to create style.scss: ${err}')
|
||||
}
|
||||
|
||||
// Create _config.yml
|
||||
config_content := 'title: HeroLib Documentation
|
||||
description: Documentation for the HeroLib project
|
||||
theme: jekyll-theme-primer
|
||||
baseurl: /herolib
|
||||
|
||||
exclude:
|
||||
- Gemfile
|
||||
- Gemfile.lock
|
||||
- node_modules
|
||||
- vendor/bundle/
|
||||
- vendor/cache/
|
||||
- vendor/gems/
|
||||
- vendor/ruby/'
|
||||
|
||||
os.write_file('docs/_config.yml', config_content) or {
|
||||
panic('Failed to create _config.yml: ${err}')
|
||||
}
|
||||
|
||||
println('Documentation generation completed successfully!')
|
||||
|
||||
71
docker/docker_ubuntu_install.sh
Normal file
71
docker/docker_ubuntu_install.sh
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit immediately if a command exits with a non-zero status
|
||||
set -e
|
||||
|
||||
# Function to display an error message and exit
|
||||
error_exit() {
|
||||
echo "Error: $1" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Update package index and upgrade system
|
||||
echo "Updating system packages..."
|
||||
sudo apt update && sudo apt upgrade -y || error_exit "Failed to update system packages."
|
||||
|
||||
# Install required packages for repository setup
|
||||
echo "Installing prerequisites..."
|
||||
sudo apt install -y ca-certificates curl gnupg || error_exit "Failed to install prerequisites."
|
||||
|
||||
# Create directory for Docker GPG key
|
||||
echo "Setting up GPG keyring..."
|
||||
sudo mkdir -p /etc/apt/keyrings || error_exit "Failed to create keyring directory."
|
||||
|
||||
# Add Docker's official GPG key
|
||||
DOCKER_GPG_KEY=/etc/apt/keyrings/docker.gpg
|
||||
echo "Adding Docker GPG key..."
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o $DOCKER_GPG_KEY || error_exit "Failed to add Docker GPG key."
|
||||
sudo chmod a+r $DOCKER_GPG_KEY
|
||||
|
||||
# Set up Docker repository
|
||||
echo "Adding Docker repository..."
|
||||
REPO_ENTRY="deb [arch=$(dpkg --print-architecture) signed-by=$DOCKER_GPG_KEY] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
if ! grep -Fxq "$REPO_ENTRY" /etc/apt/sources.list.d/docker.list; then
|
||||
echo "$REPO_ENTRY" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null || error_exit "Failed to add Docker repository."
|
||||
fi
|
||||
|
||||
# Update package index
|
||||
echo "Updating package index..."
|
||||
sudo apt update || error_exit "Failed to update package index."
|
||||
|
||||
# Install Docker Engine, CLI, and dependencies
|
||||
echo "Installing Docker Engine and dependencies..."
|
||||
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin || error_exit "Failed to install Docker packages."
|
||||
|
||||
# Verify Docker installation
|
||||
echo "Verifying Docker installation..."
|
||||
if ! docker --version; then
|
||||
error_exit "Docker installation verification failed."
|
||||
fi
|
||||
|
||||
# Run a test container
|
||||
echo "Running Docker test container..."
|
||||
if ! sudo docker run --rm hello-world; then
|
||||
error_exit "Docker test container failed to run."
|
||||
fi
|
||||
|
||||
# Add current user to Docker group (if not already added)
|
||||
echo "Configuring Docker group..."
|
||||
if ! groups $USER | grep -q '\bdocker\b'; then
|
||||
sudo usermod -aG docker $USER || error_exit "Failed to add user to Docker group."
|
||||
echo "User added to Docker group. Please log out and back in for this change to take effect."
|
||||
else
|
||||
echo "User is already in the Docker group."
|
||||
fi
|
||||
|
||||
# Enable Docker service on boot
|
||||
echo "Enabling Docker service on boot..."
|
||||
sudo systemctl enable docker || error_exit "Failed to enable Docker service."
|
||||
|
||||
# Success message
|
||||
echo "Docker installation completed successfully!"
|
||||
3
docker/herolib/.gitignore
vendored
Normal file
3
docker/herolib/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
.bash_history
|
||||
.openvscode-server/
|
||||
.cache/
|
||||
43
docker/herolib/Dockerfile
Normal file
43
docker/herolib/Dockerfile
Normal file
@@ -0,0 +1,43 @@
|
||||
# Use Ubuntu 24.04 as the base image
|
||||
FROM ubuntu:24.04
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /root
|
||||
|
||||
# Copy local installation scripts into the container
|
||||
COPY scripts/install_v.sh /tmp/install_v.sh
|
||||
COPY scripts/install_herolib.vsh /tmp/install_herolib.vsh
|
||||
COPY scripts/install_vscode.sh /tmp/install_vscode.sh
|
||||
COPY scripts/ourinit.sh /usr/local/bin/
|
||||
|
||||
# Make the scripts executable
|
||||
RUN chmod +x /tmp/install_v.sh /tmp/install_herolib.vsh
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl bash sudo mc wget tmux htop openssh-server
|
||||
|
||||
RUN bash /tmp/install_v.sh
|
||||
|
||||
RUN yes y | bash /tmp/install_v.sh --analyzer
|
||||
|
||||
RUN bash /tmp/install_vscode.sh
|
||||
|
||||
#SSH
|
||||
RUN mkdir -p /var/run/sshd && \
|
||||
echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config && \
|
||||
echo 'PasswordAuthentication no' >> /etc/ssh/sshd_config && \
|
||||
chown -R root:root /root/.ssh && \
|
||||
chmod -R 700 /root/.ssh/ && \
|
||||
touch /root/.ssh/authorized_keys \
|
||||
chmod 600 /root/.ssh/authorized_keys && \
|
||||
service ssh start
|
||||
|
||||
RUN /tmp/install_herolib.vsh && \
|
||||
apt-get clean && \
|
||||
echo "PS1='HERO: \w \$ '" >> ~/.bashrc \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
ENTRYPOINT ["/bin/bash"]
|
||||
CMD ["/bin/bash"]
|
||||
|
||||
113
docker/herolib/README.md
Normal file
113
docker/herolib/README.md
Normal file
@@ -0,0 +1,113 @@
|
||||
# HeroLib Docker Environment
|
||||
|
||||
This directory contains the Docker configuration and scripts for setting up and managing the HeroLib development environment. The environment includes a containerized setup with VSCode server, SSH access, and PostgreSQL database.
|
||||
|
||||
## Key Components
|
||||
|
||||
### Docker Configuration Files
|
||||
|
||||
- `Dockerfile`: Defines the container image based on Ubuntu 24.04, installing necessary dependencies including:
|
||||
- V language and its analyzer
|
||||
- VSCode server
|
||||
- SSH server
|
||||
- Development tools (curl, tmux, htop, etc.)
|
||||
- HeroLib installation
|
||||
|
||||
- `docker-compose.yml`: Orchestrates the multi-container setup with:
|
||||
- PostgreSQL database service
|
||||
- HeroLib development environment
|
||||
- Port mappings for various services
|
||||
- Volume mounting for code persistence
|
||||
|
||||
## Scripts
|
||||
|
||||
### Container Management
|
||||
|
||||
- `shell.sh`: Interactive shell access script that:
|
||||
- Checks if the container is running
|
||||
- Starts the container if it's stopped
|
||||
- Verifies port accessibility (default: 4000)
|
||||
- Provides interactive bash session inside the container
|
||||
|
||||
- `debug.sh`: Launches the container in debug mode with:
|
||||
- Interactive terminal
|
||||
- Volume mounts for scripts and code
|
||||
- Port mappings for various services (4000-4379)
|
||||
- Custom entrypoint using ourinit.sh
|
||||
|
||||
- `export.sh`: Creates a compressed export of the container:
|
||||
- Stops any running instances
|
||||
- Launches a temporary container
|
||||
- Runs cleanup script
|
||||
- Exports and compresses the container to ~/Downloads/herolib.tar.gz
|
||||
|
||||
### SSH Access
|
||||
|
||||
- `ssh.sh`: Simple SSH connection script to access the container via port 4022
|
||||
|
||||
- `ssh_init.sh`: Configures SSH access by:
|
||||
- Collecting public keys from local ~/.ssh directory
|
||||
- Setting up authorized_keys in the container
|
||||
- Installing and configuring SSH server
|
||||
- Setting appropriate permissions
|
||||
- Enabling root login with key authentication
|
||||
|
||||
### Internal Scripts (in scripts/)
|
||||
|
||||
- `cleanup.sh`: Comprehensive system cleanup script that:
|
||||
- Removes unused packages and dependencies
|
||||
- Cleans APT cache
|
||||
- Removes old log files
|
||||
- Clears temporary files and caches
|
||||
- Performs system maintenance tasks
|
||||
|
||||
- `install_herolib.vsh`: V script for HeroLib installation:
|
||||
- Sets up necessary symlinks
|
||||
- Configures V module structure
|
||||
- Adds useful shell aliases (e.g., vtest)
|
||||
|
||||
- `ourinit.sh`: Container initialization script that:
|
||||
- Starts Redis server in daemon mode
|
||||
- Launches VSCode server in a tmux session
|
||||
- Starts SSH service
|
||||
- Provides interactive bash shell
|
||||
|
||||
## Port Mappings
|
||||
|
||||
- 4000:3000 - Main application port
|
||||
- 4022:22 - SSH access
|
||||
- 4100:8100 - Additional service port
|
||||
- 4101:8101 - Additional service port
|
||||
- 4102:8102 - Additional service port
|
||||
- 4379:6379 - Redis port
|
||||
|
||||
## Usage
|
||||
|
||||
1. Build and start the environment:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
2. Access the container shell:
|
||||
```bash
|
||||
./shell.sh
|
||||
```
|
||||
|
||||
3. Connect via SSH:
|
||||
```bash
|
||||
./ssh.sh
|
||||
```
|
||||
|
||||
4. Debug mode (interactive with direct terminal):
|
||||
```bash
|
||||
./debug.sh
|
||||
```
|
||||
|
||||
5. Create container export:
|
||||
```bash
|
||||
./export.sh
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
The environment mounts your local `~/code` directory to `/root/code` inside the container, allowing for seamless development between host and container. The PostgreSQL database persists data using a named volume.
|
||||
40
docker/herolib/build.sh
Executable file
40
docker/herolib/build.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Get the directory where the script is located
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Copy installation files
|
||||
cp ../../install_v.sh ./scripts/install_v.sh
|
||||
cp ../../install_herolib.vsh ./scripts/install_herolib.vsh
|
||||
|
||||
# Docker image and container names
|
||||
DOCKER_IMAGE_NAME="herolib"
|
||||
DEBUG_CONTAINER_NAME="herolib"
|
||||
|
||||
function cleanup {
|
||||
if docker ps -aq -f name="$DEBUG_CONTAINER_NAME" &>/dev/null; then
|
||||
echo "Cleaning up leftover debug container..."
|
||||
docker rm -f "$DEBUG_CONTAINER_NAME" &>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Attempt to build the Docker image
|
||||
BUILD_LOG=$(mktemp)
|
||||
set +e
|
||||
docker build --name herolib --progress=plain -t "$DOCKER_IMAGE_NAME" .
|
||||
BUILD_EXIT_CODE=$?
|
||||
set -e
|
||||
|
||||
# Handle build failure
|
||||
if [ $BUILD_EXIT_CODE -ne 0 ]; then
|
||||
echo -e "\\n[ERROR] Docker build failed.\n"
|
||||
echo -e "remove the part which didn't build in the Dockerfile, the run again and to debug do:"
|
||||
echo docker run --name herolib -it --entrypoint=/bin/bash "herolib"
|
||||
exit $BUILD_EXIT_CODE
|
||||
else
|
||||
echo -e "\\n[INFO] Docker build completed successfully."
|
||||
fi
|
||||
|
||||
|
||||
19
docker/herolib/debug.sh
Executable file
19
docker/herolib/debug.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# Get the directory where the script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Remove any existing container named 'debug' (ignore errors)
|
||||
docker rm -f herolib > /dev/null 2>&1
|
||||
|
||||
docker run --name herolib -it \
|
||||
--entrypoint="/usr/local/bin/ourinit.sh" \
|
||||
-v "${SCRIPT_DIR}/scripts:/scripts" \
|
||||
-v "$HOME/code:/root/code" \
|
||||
-p 4100:8100 \
|
||||
-p 4101:8101 \
|
||||
-p 4102:8102 \
|
||||
-p 4379:6379 \
|
||||
-p 4022:22 \
|
||||
-p 4000:3000 herolib
|
||||
34
docker/herolib/docker-compose.yml
Normal file
34
docker/herolib/docker-compose.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:latest
|
||||
container_name: postgres_service
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: planetfirst
|
||||
POSTGRES_DB: mydb
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
herolib:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: herolib:latest
|
||||
container_name: herolib
|
||||
volumes:
|
||||
- ~/code:/root/code
|
||||
stdin_open: true
|
||||
tty: true
|
||||
ports:
|
||||
- "4100:8100"
|
||||
- "4101:8101"
|
||||
- "4102:8102"
|
||||
- "4379:6379"
|
||||
- "4000:3000"
|
||||
- "4022:22"
|
||||
command: ["/usr/local/bin/ourinit.sh"]
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
|
||||
92
docker/herolib/export.sh
Executable file
92
docker/herolib/export.sh
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# Get the directory where the script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
docker compose down
|
||||
|
||||
docker rm herolib --force
|
||||
|
||||
# Start the container in detached mode (-d)
|
||||
docker run --name herolib \
|
||||
--entrypoint="/bin/bash" \
|
||||
-v "${SCRIPT_DIR}/scripts:/scripts" \
|
||||
-p 4022:22 \
|
||||
-d herolib -c "while true; do sleep 1; done"
|
||||
|
||||
docker exec -it herolib /scripts/cleanup.sh
|
||||
|
||||
|
||||
# Detect the OS
|
||||
detect_os() {
|
||||
if [[ "$(uname)" == "Darwin" ]]; then
|
||||
echo "osx"
|
||||
elif [[ -f /etc/os-release ]]; then
|
||||
. /etc/os-release
|
||||
if [[ "$ID" == "ubuntu" ]]; then
|
||||
echo "ubuntu"
|
||||
fi
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
OS=$(detect_os)
|
||||
|
||||
if [[ "$OS" == "osx" ]]; then
|
||||
echo "Running on macOS..."
|
||||
docker export herolib | gzip > "${HOME}/Downloads/herolib.tar.gz"
|
||||
echo "Docker image exported to ${HOME}/Downloads/herolib.tar.gz"
|
||||
elif [[ "$OS" == "ubuntu" ]]; then
|
||||
echo "Running on Ubuntu..."
|
||||
export TEMP_TAR="/tmp/herolib.tar"
|
||||
|
||||
# Export the Docker container to a tar file
|
||||
docker export herolib > "$TEMP_TAR"
|
||||
echo "Docker container exported to $TEMP_TAR"
|
||||
|
||||
# Import the tar file back as a single-layer image
|
||||
docker import "$TEMP_TAR" herolib:single-layer
|
||||
echo "Docker image imported as single-layer: herolib:single-layer"
|
||||
|
||||
# Log in to Docker Hub and push the image
|
||||
docker login --username despiegk
|
||||
docker tag herolib:single-layer despiegk/herolib:single-layer
|
||||
docker push despiegk/herolib:single-layer
|
||||
echo "Docker image pushed to Docker Hub as despiegk/herolib:single-layer"
|
||||
|
||||
# Optionally remove the tar file after importing
|
||||
rm -f "$TEMP_TAR"
|
||||
echo "Temporary file $TEMP_TAR removed"
|
||||
|
||||
else
|
||||
echo "Unsupported OS detected. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker kill herolib
|
||||
|
||||
|
||||
# Test the pushed Docker image locally
|
||||
echo "Testing the Docker image locally..."
|
||||
TEST_CONTAINER_NAME="test_herolib_container"
|
||||
|
||||
docker pull despiegk/herolib:single-layer
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Failed to pull the Docker image from Docker Hub. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run --name "$TEST_CONTAINER_NAME" -d despiegk/herolib:single-layer
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Failed to run the Docker image as a container. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker ps | grep "$TEST_CONTAINER_NAME"
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Container $TEST_CONTAINER_NAME is running successfully."
|
||||
else
|
||||
echo "Container $TEST_CONTAINER_NAME is not running. Check the logs for details."
|
||||
fi
|
||||
67
docker/herolib/scripts/cleanup.sh
Executable file
67
docker/herolib/scripts/cleanup.sh
Executable file
@@ -0,0 +1,67 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Log file for cleanup operations
|
||||
LOG_FILE="/var/log/cleanup_script.log"
|
||||
exec > >(tee -a $LOG_FILE) 2>&1
|
||||
|
||||
# Function to check and execute commands safely
|
||||
safe_run() {
|
||||
echo "Running: $*"
|
||||
eval "$*"
|
||||
}
|
||||
|
||||
# Update package lists
|
||||
safe_run "apt update"
|
||||
|
||||
# Remove unused packages and dependencies
|
||||
safe_run "apt autoremove -y"
|
||||
|
||||
# Clean up APT cache
|
||||
safe_run "apt clean"
|
||||
safe_run "apt autoclean"
|
||||
|
||||
# Remove old kernels (keeping the current and latest one)
|
||||
safe_run "apt remove --purge -y $(dpkg --list | grep linux-image | awk '{print $2}' | grep -v $(uname -r | sed 's/[^-]*-[^-]*-//') | sort | head -n -1)"
|
||||
|
||||
# Clear systemd journal logs, keeping only the latest 7 days
|
||||
safe_run "journalctl --vacuum-time=7d"
|
||||
|
||||
# Remove orphaned packages
|
||||
safe_run "deborphan | xargs apt-get -y remove --purge"
|
||||
|
||||
# Clear thumbnail cache
|
||||
safe_run "rm -rf ~/.cache/thumbnails/*"
|
||||
|
||||
# Remove old log files
|
||||
safe_run "find /var/log -type f -name '*.log' -delete"
|
||||
|
||||
# Clear temporary files
|
||||
safe_run "rm -rf /tmp/*"
|
||||
safe_run "rm -rf /var/tmp/*"
|
||||
|
||||
# Remove user-specific temporary files (adjust for other users as needed)
|
||||
safe_run "rm -rf ~/.cache/*"
|
||||
|
||||
# Remove .pyc files
|
||||
safe_run "find / -type f -name '*.pyc' -delete"
|
||||
|
||||
# Remove unused snap versions
|
||||
#safe_run "snap list --all | awk '/disabled/{print $1, $3}' | while read snapname revision; do snap remove "$snapname" --revision="$revision"; done"
|
||||
|
||||
# Clear trash for all users
|
||||
safe_run "rm -rf /home/*/.local/share/Trash/*/**"
|
||||
safe_run "rm -rf /root/.local/share/Trash/*/**"
|
||||
|
||||
# Free up swap space
|
||||
#safe_run "swapoff -a && swapon -a"
|
||||
|
||||
# Update GRUB (in case old kernels were removed)
|
||||
#safe_run "update-grub"
|
||||
|
||||
# # Final system update and upgrade
|
||||
# safe_run "apt upgrade -y"
|
||||
|
||||
# Report completion
|
||||
echo "System cleanup completed successfully."
|
||||
|
||||
|
||||
71
docker/herolib/scripts/install_herolib.vsh
Executable file
71
docker/herolib/scripts/install_herolib.vsh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import flag
|
||||
|
||||
fn addtoscript(tofind string, toadd string) ! {
|
||||
home_dir := os.home_dir()
|
||||
mut rc_file := '${home_dir}/.zshrc'
|
||||
if !os.exists(rc_file) {
|
||||
rc_file = '${home_dir}/.bashrc'
|
||||
if !os.exists(rc_file) {
|
||||
return error('No .zshrc or .bashrc found in home directory')
|
||||
}
|
||||
}
|
||||
|
||||
// Read current content
|
||||
mut content := os.read_file(rc_file)!
|
||||
|
||||
// Remove existing alias if present
|
||||
lines := content.split('\n')
|
||||
mut new_lines := []string{}
|
||||
mut prev_is_emtpy := false
|
||||
for line in lines {
|
||||
if prev_is_emtpy {
|
||||
if line.trim_space() == ""{
|
||||
continue
|
||||
}else{
|
||||
prev_is_emtpy = false
|
||||
}
|
||||
}
|
||||
if line.trim_space() == ""{
|
||||
prev_is_emtpy = true
|
||||
}
|
||||
|
||||
if !line.contains(tofind) {
|
||||
new_lines << line
|
||||
}
|
||||
}
|
||||
new_lines << toadd
|
||||
new_lines << ""
|
||||
// Write back to file
|
||||
new_content := new_lines.join('\n')
|
||||
os.write_file(rc_file, new_content)!
|
||||
}
|
||||
|
||||
|
||||
vroot := @VROOT
|
||||
abs_dir_of_script := dir(@FILE)
|
||||
|
||||
// Reset symlinks if requested
|
||||
println('Resetting all symlinks...')
|
||||
os.rm('${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or {}
|
||||
|
||||
// Create necessary directories
|
||||
os.mkdir_all('${os.home_dir()}/.vmodules/freeflowuniverse') or {
|
||||
panic('Failed to create directory ~/.vmodules/freeflowuniverse: ${err}')
|
||||
}
|
||||
|
||||
// Create new symlinks
|
||||
os.symlink('${abs_dir_of_script}/lib', '${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or {
|
||||
panic('Failed to create herolib symlink: ${err}')
|
||||
}
|
||||
|
||||
println('Herolib installation completed successfully!')
|
||||
|
||||
// Add vtest alias
|
||||
addtoscript('alias vtest=', 'alias vtest=\'v -stats -enable-globals -n -w -cg -gc none -cc tcc test\' ') or {
|
||||
eprintln('Failed to add vtest alias: ${err}')
|
||||
}
|
||||
|
||||
println('Added vtest alias to shell configuration')
|
||||
473
docker/herolib/scripts/install_v.sh
Executable file
473
docker/herolib/scripts/install_v.sh
Executable file
@@ -0,0 +1,473 @@
|
||||
|
||||
#!/bin/bash -e
|
||||
|
||||
# Help function
|
||||
print_help() {
|
||||
echo "V & HeroLib Installer Script"
|
||||
echo
|
||||
echo "Usage: $0 [options]"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -h, --help Show this help message"
|
||||
echo " --reset Force reinstallation of V"
|
||||
echo " --remove Remove V installation and exit"
|
||||
echo " --analyzer Install/update v-analyzer"
|
||||
echo " --herolib Install our herolib"
|
||||
echo
|
||||
echo "Examples:"
|
||||
echo " $0"
|
||||
echo " $0 --reset "
|
||||
echo " $0 --remove "
|
||||
echo " $0 --analyzer "
|
||||
echo " $0 --herolib "
|
||||
echo " $0 --reset --analyzer # Fresh install of both"
|
||||
echo
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
RESET=false
|
||||
REMOVE=false
|
||||
INSTALL_ANALYZER=false
|
||||
HEROLIB=false
|
||||
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
-h|--help)
|
||||
print_help
|
||||
exit 0
|
||||
;;
|
||||
--reset)
|
||||
RESET=true
|
||||
;;
|
||||
--remove)
|
||||
REMOVE=true
|
||||
;;
|
||||
--herolib)
|
||||
HEROLIB=true
|
||||
;;
|
||||
--analyzer)
|
||||
INSTALL_ANALYZER=true
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $arg"
|
||||
echo "Use -h or --help to see available options"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Function to check if command exists
|
||||
command_exists() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
export DIR_BASE="$HOME"
|
||||
export DIR_BUILD="/tmp"
|
||||
export DIR_CODE="$DIR_BASE/code"
|
||||
export DIR_CODE_V="$DIR_BASE/_code"
|
||||
|
||||
function sshknownkeysadd {
|
||||
mkdir -p ~/.ssh
|
||||
touch ~/.ssh/known_hosts
|
||||
if ! grep github.com ~/.ssh/known_hosts > /dev/null
|
||||
then
|
||||
ssh-keyscan github.com >> ~/.ssh/known_hosts
|
||||
fi
|
||||
if ! grep git.ourworld.tf ~/.ssh/known_hosts > /dev/null
|
||||
then
|
||||
ssh-keyscan git.ourworld.tf >> ~/.ssh/known_hosts
|
||||
fi
|
||||
git config --global pull.rebase false
|
||||
|
||||
}
|
||||
|
||||
function package_check_install {
|
||||
local command_name="$1"
|
||||
if command -v "$command_name" >/dev/null 2>&1; then
|
||||
echo "command '$command_name' is already installed."
|
||||
else
|
||||
package_install '$command_name'
|
||||
fi
|
||||
}
|
||||
|
||||
function package_install {
|
||||
local command_name="$1"
|
||||
if [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential
|
||||
elif [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
brew install $command_name
|
||||
elif [[ "${OSNAME}" == "alpine"* ]]; then
|
||||
apk add $command_name
|
||||
elif [[ "${OSNAME}" == "arch"* ]]; then
|
||||
pacman --noconfirm -Su $command_name
|
||||
else
|
||||
echo "platform : ${OSNAME} not supported"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
is_github_actions() {
|
||||
[ -d "/home/runner" ] || [ -d "$HOME/runner" ]
|
||||
}
|
||||
|
||||
|
||||
function myplatform {
|
||||
if [[ "${OSTYPE}" == "darwin"* ]]; then
|
||||
export OSNAME='darwin'
|
||||
elif [ -e /etc/os-release ]; then
|
||||
# Read the ID field from the /etc/os-release file
|
||||
export OSNAME=$(grep '^ID=' /etc/os-release | cut -d= -f2)
|
||||
if [ "${os_id,,}" == "ubuntu" ]; then
|
||||
export OSNAME="ubuntu"
|
||||
fi
|
||||
if [ "${OSNAME}" == "archarm" ]; then
|
||||
export OSNAME="arch"
|
||||
fi
|
||||
if [ "${OSNAME}" == "debian" ]; then
|
||||
export OSNAME="ubuntu"
|
||||
fi
|
||||
else
|
||||
echo "Unable to determine the operating system."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# if [ "$(uname -m)" == "x86_64" ]; then
|
||||
# echo "This system is running a 64-bit processor."
|
||||
# else
|
||||
# echo "This system is not running a 64-bit processor."
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
}
|
||||
|
||||
myplatform
|
||||
|
||||
function os_update {
|
||||
echo ' - os update'
|
||||
if [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
if is_github_actions; then
|
||||
echo "github actions"
|
||||
else
|
||||
rm -f /var/lib/apt/lists/lock
|
||||
rm -f /var/cache/apt/archives/lock
|
||||
rm -f /var/lib/dpkg/lock*
|
||||
fi
|
||||
export TERM=xterm
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
dpkg --configure -a
|
||||
apt update -y
|
||||
if is_github_actions; then
|
||||
echo "** IN GITHUB ACTIONS, DON'T DO UPDATE"
|
||||
else
|
||||
set +e
|
||||
echo "** UPDATE"
|
||||
apt-mark hold grub-efi-amd64-signed
|
||||
set -e
|
||||
apt upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes
|
||||
apt autoremove -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes
|
||||
fi
|
||||
#apt install apt-transport-https ca-certificates curl software-properties-common -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes
|
||||
package_install "apt-transport-https ca-certificates curl wget software-properties-common tmux"
|
||||
package_install "rclone rsync mc redis-server screen net-tools git dnsutils htop ca-certificates screen lsb-release binutils pkg-config"
|
||||
|
||||
elif [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
if command -v brew >/dev/null 2>&1; then
|
||||
echo ' - homebrew installed'
|
||||
else
|
||||
export NONINTERACTIVE=1
|
||||
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
unset NONINTERACTIVE
|
||||
fi
|
||||
set +e
|
||||
brew install mc redis curl tmux screen htop wget rclone tcc
|
||||
set -e
|
||||
elif [[ "${OSNAME}" == "alpine"* ]]; then
|
||||
apk update screen git htop tmux
|
||||
apk add mc curl rsync htop redis bash bash-completion screen git rclone
|
||||
sed -i 's#/bin/ash#/bin/bash#g' /etc/passwd
|
||||
elif [[ "${OSNAME}" == "arch"* ]]; then
|
||||
pacman -Syy --noconfirm
|
||||
pacman -Syu --noconfirm
|
||||
pacman -Su --noconfirm arch-install-scripts gcc mc git tmux curl htop redis wget screen net-tools git sudo htop ca-certificates lsb-release screen rclone
|
||||
|
||||
# Check if builduser exists, create if not
|
||||
if ! id -u builduser > /dev/null 2>&1; then
|
||||
useradd -m builduser
|
||||
echo "builduser:$(openssl rand -base64 32 | sha256sum | base64 | head -c 32)" | chpasswd
|
||||
echo 'builduser ALL=(ALL) NOPASSWD: ALL' | tee /etc/sudoers.d/builduser
|
||||
fi
|
||||
|
||||
if [[ -n "${DEBUG}" ]]; then
|
||||
execute_with_marker "paru_install" paru_install
|
||||
fi
|
||||
fi
|
||||
echo ' - os update done'
|
||||
}
|
||||
|
||||
|
||||
function hero_lib_pull {
|
||||
pushd $DIR_CODE/github/freeflowuniverse/herolib 2>&1 >> /dev/null
|
||||
if [[ $(git status -s) ]]; then
|
||||
echo "There are uncommitted changes in the Git repository herolib."
|
||||
return 1
|
||||
fi
|
||||
git pull
|
||||
popd 2>&1 >> /dev/null
|
||||
}
|
||||
|
||||
function hero_lib_get {
|
||||
|
||||
mkdir -p $DIR_CODE/github/freeflowuniverse
|
||||
if [[ -d "$DIR_CODE/github/freeflowuniverse/herolib" ]]
|
||||
then
|
||||
hero_lib_pull
|
||||
else
|
||||
pushd $DIR_CODE/github/freeflowuniverse 2>&1 >> /dev/null
|
||||
git clone --depth 1 --no-single-branch https://github.com/freeflowuniverse/herolib.git
|
||||
popd 2>&1 >> /dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
function install_secp256k1 {
|
||||
echo "Installing secp256k1..."
|
||||
if [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
brew install secp256k1
|
||||
elif [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
# Install build dependencies
|
||||
apt-get install -y build-essential wget autoconf libtool
|
||||
|
||||
# Download and extract secp256k1
|
||||
cd "${DIR_BUILD}"
|
||||
wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz
|
||||
tar -xvf v0.3.2.tar.gz
|
||||
|
||||
# Build and install
|
||||
cd secp256k1-0.3.2/
|
||||
./autogen.sh
|
||||
./configure
|
||||
make -j 5
|
||||
make install
|
||||
|
||||
# Cleanup
|
||||
cd ..
|
||||
rm -rf secp256k1-0.3.2 v0.3.2.tar.gz
|
||||
else
|
||||
echo "secp256k1 installation not implemented for ${OSNAME}"
|
||||
exit 1
|
||||
fi
|
||||
echo "secp256k1 installation complete!"
|
||||
}
|
||||
|
||||
|
||||
remove_all() {
|
||||
echo "Removing V installation..."
|
||||
# Set reset to true to use existing reset functionality
|
||||
RESET=true
|
||||
# Call reset functionality
|
||||
sudo rm -rf ~/code/v
|
||||
sudo rm -rf ~/_code/v
|
||||
sudo rm -rf ~/.config/v-analyzer
|
||||
if command_exists v; then
|
||||
echo "Removing V from system..."
|
||||
sudo rm -f $(which v)
|
||||
fi
|
||||
if command_exists v-analyzer; then
|
||||
echo "Removing v-analyzer from system..."
|
||||
sudo rm -f $(which v-analyzer)
|
||||
fi
|
||||
|
||||
# Remove v-analyzer path from rc files
|
||||
for RC_FILE in ~/.zshrc ~/.bashrc; do
|
||||
if [ -f "$RC_FILE" ]; then
|
||||
echo "Cleaning up $RC_FILE..."
|
||||
# Create a temporary file
|
||||
TMP_FILE=$(mktemp)
|
||||
# Remove lines containing v-analyzer/bin path
|
||||
sed '/v-analyzer\/bin/d' "$RC_FILE" > "$TMP_FILE"
|
||||
# Remove empty lines at the end of file
|
||||
sed -i.bak -e :a -e '/^\n*$/{$d;N;ba' -e '}' "$TMP_FILE"
|
||||
# Replace original file
|
||||
mv "$TMP_FILE" "$RC_FILE"
|
||||
echo "Cleaned up $RC_FILE"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "V removal complete"
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Function to check if a service is running and start it if needed
|
||||
check_and_start_redis() {
|
||||
|
||||
# Normal service management for non-container environments
|
||||
if [[ "${OSNAME}" == "ubuntu" ]] || [[ "${OSNAME}" == "debian" ]]; then
|
||||
|
||||
# Check if running inside a container
|
||||
if grep -q "/docker/" /proc/1/cgroup || [ ! -d "/run/systemd/system" ]; then
|
||||
echo "Running inside a container. Starting redis directly."
|
||||
|
||||
if pgrep redis-server > /dev/null; then
|
||||
echo "redis is already running."
|
||||
else
|
||||
echo "redis is not running. Starting it in the background..."
|
||||
redis-server --daemonize yes
|
||||
if pgrep redis-server > /dev/null; then
|
||||
echo "redis started successfully."
|
||||
else
|
||||
echo "Failed to start redis. Please check logs for details."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
if systemctl is-active --quiet "redis"; then
|
||||
echo "redis is already running."
|
||||
else
|
||||
echo "redis is not running. Starting it..."
|
||||
sudo systemctl start "redis"
|
||||
if systemctl is-active --quiet "redis"; then
|
||||
echo "redis started successfully."
|
||||
else
|
||||
echo "Failed to start redis. Please check logs for details."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
elif [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
if brew services list | grep -q "^redis.*started"; then
|
||||
echo "redis is already running."
|
||||
else
|
||||
echo "redis is not running. Starting it..."
|
||||
brew services start redis
|
||||
fi
|
||||
elif [[ "${OSNAME}" == "alpine"* ]]; then
|
||||
if rc-service "redis" status | grep -q "running"; then
|
||||
echo "redis is already running."
|
||||
else
|
||||
echo "redis is not running. Starting it..."
|
||||
rc-service "redis" start
|
||||
fi
|
||||
elif [[ "${OSNAME}" == "arch"* ]]; then
|
||||
if systemctl is-active --quiet "redis"; then
|
||||
echo "redis is already running."
|
||||
else
|
||||
echo "redis is not running. Starting it..."
|
||||
sudo systemctl start "redis"
|
||||
fi
|
||||
else
|
||||
echo "Service management for redis is not implemented for platform: $OSNAME"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Handle remove if requested
|
||||
if [ "$REMOVE" = true ]; then
|
||||
remove_all
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Handle reset if requested
|
||||
if [ "$RESET" = true ]; then
|
||||
remove_all
|
||||
echo "Reset complete"
|
||||
fi
|
||||
|
||||
# Create code directory if it doesn't exist
|
||||
mkdir -p ~/code
|
||||
|
||||
|
||||
# Check if v needs to be installed
|
||||
if [ "$RESET" = true ] || ! command_exists v; then
|
||||
|
||||
os_update
|
||||
|
||||
sshknownkeysadd
|
||||
|
||||
# Install secp256k1
|
||||
install_secp256k1
|
||||
|
||||
# Only clone and install if directory doesn't exist
|
||||
if [ ! -d ~/code/v ]; then
|
||||
echo "Installing V..."
|
||||
mkdir -p ~/_code
|
||||
cd ~/_code
|
||||
git clone --depth=1 https://github.com/vlang/v
|
||||
cd v
|
||||
make
|
||||
sudo ./v symlink
|
||||
fi
|
||||
|
||||
# Verify v is in path
|
||||
if ! command_exists v; then
|
||||
echo "Error: V installation failed or not in PATH"
|
||||
echo "Please ensure ~/code/v is in your PATH"
|
||||
exit 1
|
||||
fi
|
||||
echo "V installation successful!"
|
||||
fi
|
||||
|
||||
# Install v-analyzer if requested
|
||||
if [ "$INSTALL_ANALYZER" = true ]; then
|
||||
echo "Installing v-analyzer..."
|
||||
v download -RD https://raw.githubusercontent.com/vlang/v-analyzer/main/install.vsh
|
||||
|
||||
# Check if v-analyzer bin directory exists
|
||||
if [ ! -d "$HOME/.config/v-analyzer/bin" ]; then
|
||||
echo "Error: v-analyzer bin directory not found at $HOME/.config/v-analyzer/bin"
|
||||
echo "Please ensure v-analyzer was installed correctly"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "v-analyzer installation successful!"
|
||||
fi
|
||||
|
||||
# Add v-analyzer to PATH if installed
|
||||
if [ -d "$HOME/.config/v-analyzer/bin" ]; then
|
||||
V_ANALYZER_PATH='export PATH="$PATH:$HOME/.config/v-analyzer/bin"'
|
||||
|
||||
# Function to add path to rc file if not present
|
||||
add_to_rc() {
|
||||
local RC_FILE="$1"
|
||||
if [ -f "$RC_FILE" ]; then
|
||||
if ! grep -q "v-analyzer/bin" "$RC_FILE"; then
|
||||
echo "" >> "$RC_FILE"
|
||||
echo "$V_ANALYZER_PATH" >> "$RC_FILE"
|
||||
echo "Added v-analyzer to $RC_FILE"
|
||||
else
|
||||
echo "v-analyzer path already exists in $RC_FILE"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Add to both .zshrc and .bashrc if they exist
|
||||
add_to_rc ~/.zshrc
|
||||
if [ "$(uname)" = "Darwin" ] && [ -f ~/.bashrc ]; then
|
||||
add_to_rc ~/.bashrc
|
||||
fi
|
||||
fi
|
||||
|
||||
# Final verification
|
||||
if ! command_exists v; then
|
||||
echo "Error: V is not accessible in PATH"
|
||||
echo "Please add ~/code/v to your PATH and try again"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check_and_start_redis
|
||||
|
||||
if [ "$HEROLIB" = true ]; then
|
||||
hero_lib_get
|
||||
~/code/github/freeflowuniverse/herolib/install_herolib.vsh
|
||||
fi
|
||||
|
||||
|
||||
# if [ "$INSTALL_ANALYZER" = true ]; then
|
||||
# echo "Run 'source ~/.bashrc' or 'source ~/.zshrc' to update PATH for v-analyzer"
|
||||
# fi
|
||||
|
||||
|
||||
echo "Installation complete!"
|
||||
98
docker/herolib/scripts/install_vscode.sh
Executable file
98
docker/herolib/scripts/install_vscode.sh
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Set version and file variables
|
||||
OPENVSCODE_SERVER_VERSION="1.97.0"
|
||||
TMP_DIR="/tmp"
|
||||
FILENAME="openvscode.tar.gz"
|
||||
FILE_PATH="$TMP_DIR/$FILENAME"
|
||||
INSTALL_DIR="/opt/openvscode"
|
||||
BIN_PATH="/usr/local/bin/openvscode-server"
|
||||
TMUX_SESSION="openvscode-server"
|
||||
|
||||
# Function to detect architecture
|
||||
get_architecture() {
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64)
|
||||
echo "x64"
|
||||
;;
|
||||
aarch64)
|
||||
echo "arm64"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture: $ARCH" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Check if OpenVSCode Server is already installed
|
||||
if [ -d "$INSTALL_DIR" ] && [ -x "$BIN_PATH" ]; then
|
||||
echo "OpenVSCode Server is already installed at $INSTALL_DIR. Skipping download and installation."
|
||||
else
|
||||
# Determine architecture-specific URL
|
||||
ARCH=$(get_architecture)
|
||||
if [ "$ARCH" == "x64" ]; then
|
||||
DOWNLOAD_URL="https://github.com/gitpod-io/openvscode-server/releases/download/openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}/openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}-linux-x64.tar.gz"
|
||||
elif [ "$ARCH" == "arm64" ]; then
|
||||
DOWNLOAD_URL="https://github.com/gitpod-io/openvscode-server/releases/download/openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}/openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}-linux-arm64.tar.gz"
|
||||
fi
|
||||
|
||||
# Navigate to temporary directory
|
||||
cd "$TMP_DIR"
|
||||
|
||||
# Remove existing file if it exists
|
||||
if [ -f "$FILE_PATH" ]; then
|
||||
rm -f "$FILE_PATH"
|
||||
fi
|
||||
|
||||
# Download file using curl
|
||||
curl -L "$DOWNLOAD_URL" -o "$FILE_PATH"
|
||||
|
||||
# Verify file size is greater than 40 MB (40 * 1024 * 1024 bytes)
|
||||
FILE_SIZE=$(stat -c%s "$FILE_PATH")
|
||||
if [ "$FILE_SIZE" -le $((40 * 1024 * 1024)) ]; then
|
||||
echo "Error: Downloaded file size is less than 40 MB." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract the tar.gz file
|
||||
EXTRACT_DIR="openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}-linux-${ARCH}"
|
||||
tar -xzf "$FILE_PATH"
|
||||
|
||||
# Move the extracted directory to the install location
|
||||
if [ -d "$INSTALL_DIR" ]; then
|
||||
rm -rf "$INSTALL_DIR"
|
||||
fi
|
||||
mv "$EXTRACT_DIR" "$INSTALL_DIR"
|
||||
|
||||
# Create a symlink for easy access
|
||||
ln -sf "$INSTALL_DIR/bin/openvscode-server" "$BIN_PATH"
|
||||
|
||||
# Verify installation
|
||||
if ! command -v openvscode-server >/dev/null 2>&1; then
|
||||
echo "Error: Failed to create symlink for openvscode-server." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install default plugins
|
||||
PLUGINS=("ms-python.python" "esbenp.prettier-vscode" "saoudrizwan.claude-dev" "yzhang.markdown-all-in-one" "ms-vscode-remote.remote-ssh" "ms-vscode.remote-explorer" "charliermarsh.ruff" "qwtel.sqlite-viewer" "vosca.vscode-v-analyzer" "tomoki1207.pdf")
|
||||
for PLUGIN in "${PLUGINS[@]}"; do
|
||||
"$INSTALL_DIR/bin/openvscode-server" --install-extension "$PLUGIN"
|
||||
done
|
||||
|
||||
echo "Default plugins installed: ${PLUGINS[*]}"
|
||||
|
||||
# Clean up temporary directory
|
||||
if [ -d "$TMP_DIR" ]; then
|
||||
find "$TMP_DIR" -maxdepth 1 -type f -name "openvscode*" -exec rm -f {} \;
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start OpenVSCode Server in a tmux session
|
||||
if tmux has-session -t "$TMUX_SESSION" 2>/dev/null; then
|
||||
tmux kill-session -t "$TMUX_SESSION"
|
||||
fi
|
||||
tmux new-session -d -s "$TMUX_SESSION" "$INSTALL_DIR/bin/openvscode-server"
|
||||
|
||||
echo "OpenVSCode Server is running in a tmux session named '$TMUX_SESSION'."
|
||||
13
docker/herolib/scripts/ourinit.sh
Executable file
13
docker/herolib/scripts/ourinit.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
redis-server --daemonize yes
|
||||
|
||||
TMUX_SESSION="vscode"
|
||||
# Start OpenVSCode Server in a tmux session
|
||||
if tmux has-session -t "$TMUX_SESSION" 2>/dev/null; then
|
||||
tmux kill-session -t "$TMUX_SESSION"
|
||||
fi
|
||||
tmux new-session -d -s "$TMUX_SESSION" "/usr/local/bin/openvscode-server --host 0.0.0.0 --without-connection-token"
|
||||
|
||||
service ssh start
|
||||
|
||||
exec /bin/bash
|
||||
61
docker/herolib/shell.sh
Executable file
61
docker/herolib/shell.sh
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Get the directory where the script is located
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
CONTAINER_NAME="herolib"
|
||||
TARGET_PORT=4000
|
||||
|
||||
# Function to check if a container is running
|
||||
is_container_running() {
|
||||
docker ps --filter "name=$CONTAINER_NAME" --filter "status=running" -q
|
||||
}
|
||||
|
||||
# Function to check if a port is accessible
|
||||
is_port_accessible() {
|
||||
nc -zv 127.0.0.1 "$1" &>/dev/null
|
||||
}
|
||||
|
||||
# Check if the container exists and is running
|
||||
if ! is_container_running; then
|
||||
echo "Container $CONTAINER_NAME is not running."
|
||||
|
||||
# Check if the container exists but is stopped
|
||||
if docker ps -a --filter "name=$CONTAINER_NAME" -q | grep -q .; then
|
||||
echo "Starting existing container $CONTAINER_NAME..."
|
||||
docker start "$CONTAINER_NAME"
|
||||
else
|
||||
echo "Container $CONTAINER_NAME does not exist. Attempting to start with start.sh..."
|
||||
if [[ -f "$SCRIPT_DIR/start.sh" ]]; then
|
||||
bash "$SCRIPT_DIR/start.sh"
|
||||
else
|
||||
echo "Error: start.sh not found in $SCRIPT_DIR."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Wait for the container to be fully up
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Verify the container is running
|
||||
if ! is_container_running; then
|
||||
echo "Error: Failed to start container $CONTAINER_NAME."
|
||||
exit 1
|
||||
fi
|
||||
echo "Container $CONTAINER_NAME is running."
|
||||
|
||||
# Check if the target port is accessible
|
||||
if is_port_accessible "$TARGET_PORT"; then
|
||||
echo "Port $TARGET_PORT is accessible."
|
||||
else
|
||||
echo "Port $TARGET_PORT is not accessible. Please check the service inside the container."
|
||||
fi
|
||||
|
||||
# Enter the container
|
||||
echo
|
||||
echo " ** WE NOW LOGIN TO THE CONTAINER ** "
|
||||
echo
|
||||
docker exec -it herolib bash
|
||||
|
||||
3
docker/herolib/ssh.sh
Executable file
3
docker/herolib/ssh.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
ssh root@localhost -p 4022
|
||||
63
docker/herolib/ssh_init.sh
Executable file
63
docker/herolib/ssh_init.sh
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Get the directory where the script is located
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Define variables
|
||||
CONTAINER_NAME="herolib"
|
||||
CONTAINER_SSH_DIR="/root/.ssh"
|
||||
AUTHORIZED_KEYS="authorized_keys"
|
||||
TEMP_AUTH_KEYS="/tmp/authorized_keys"
|
||||
|
||||
# Step 1: Create a temporary file to store public keys
|
||||
> $TEMP_AUTH_KEYS # Clear the file if it exists
|
||||
|
||||
# Step 2: Add public keys from ~/.ssh/ if they exist
|
||||
if ls ~/.ssh/*.pub 1>/dev/null 2>&1; then
|
||||
cat ~/.ssh/*.pub >> $TEMP_AUTH_KEYS
|
||||
fi
|
||||
|
||||
# Step 3: Check if ssh-agent is running and get public keys from it
|
||||
if pgrep ssh-agent >/dev/null; then
|
||||
echo "ssh-agent is running. Fetching keys..."
|
||||
ssh-add -L >> $TEMP_AUTH_KEYS 2>/dev/null
|
||||
else
|
||||
echo "ssh-agent is not running or no keys loaded."
|
||||
fi
|
||||
|
||||
# Step 4: Ensure the temporary file is not empty
|
||||
if [ ! -s $TEMP_AUTH_KEYS ]; then
|
||||
echo "No public keys found. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 5: Ensure the container's SSH directory exists
|
||||
docker exec -it $CONTAINER_NAME mkdir -p $CONTAINER_SSH_DIR
|
||||
docker exec -it $CONTAINER_NAME chmod 700 $CONTAINER_SSH_DIR
|
||||
|
||||
# Step 6: Copy the public keys into the container's authorized_keys file
|
||||
docker cp $TEMP_AUTH_KEYS $CONTAINER_NAME:$CONTAINER_SSH_DIR/$AUTHORIZED_KEYS
|
||||
|
||||
# Step 7: Set proper permissions for authorized_keys
|
||||
docker exec -it $CONTAINER_NAME chmod 600 $CONTAINER_SSH_DIR/$AUTHORIZED_KEYS
|
||||
|
||||
# Step 8: Install and start the SSH server inside the container
|
||||
docker exec -it $CONTAINER_NAME bash -c "
|
||||
apt-get update &&
|
||||
apt-get install -y openssh-server &&
|
||||
mkdir -p /var/run/sshd &&
|
||||
echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config &&
|
||||
echo 'PasswordAuthentication no' >> /etc/ssh/sshd_config &&
|
||||
chown -R root:root /root/.ssh &&
|
||||
chmod -R 700 /root/.ssh/ &&
|
||||
chmod 600 /root/.ssh/authorized_keys &&
|
||||
service ssh start
|
||||
"
|
||||
|
||||
# Step 9: Clean up temporary file on the host
|
||||
rm $TEMP_AUTH_KEYS
|
||||
|
||||
echo "SSH keys added and SSH server configured. You can now SSH into the container."
|
||||
|
||||
ssh root@localhost -p 4022
|
||||
11
docker/herolib/start.sh
Executable file
11
docker/herolib/start.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Get the directory where the script is located
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
docker rm herolib --force
|
||||
|
||||
docker compose up -d
|
||||
|
||||
./ssh_init.sh
|
||||
22
docker/postgresql/docker-compose.yml
Normal file
22
docker/postgresql/docker-compose.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
version: '3.9'
|
||||
services:
|
||||
db:
|
||||
image: 'postgres:17.2-alpine3.21'
|
||||
restart: always
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
POSTGRES_PASSWORD: 1234
|
||||
networks:
|
||||
- my_network
|
||||
|
||||
adminer:
|
||||
image: adminer
|
||||
restart: always
|
||||
ports:
|
||||
- 8080:8080
|
||||
networks:
|
||||
- my_network
|
||||
|
||||
networks:
|
||||
my_network:
|
||||
6
docker/postgresql/readme.md
Normal file
6
docker/postgresql/readme.md
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
|
||||
Server (Host): db (because Docker Compose creates an internal network and uses service names as hostnames)
|
||||
Username: postgres (default PostgreSQL username)
|
||||
Password: 1234 (as set in your POSTGRES_PASSWORD environment variable)
|
||||
Database: Leave it empty or enter postgres (default database)
|
||||
13
docker/postgresql/start.sh
Executable file
13
docker/postgresql/start.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Get the directory where the script is located
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Stop any existing containers and remove them
|
||||
docker compose down
|
||||
|
||||
# Start the services in detached mode
|
||||
docker compose up -d
|
||||
|
||||
echo "PostgreSQL is ready"
|
||||
@@ -34,7 +34,7 @@ The examples directory demonstrates various capabilities of HeroLib:
|
||||
When creating V scripts (.vsh files), always use the following shebang:
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
```
|
||||
|
||||
This shebang ensures:
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.installers.base
|
||||
import freeflowuniverse.herolib.core
|
||||
|
||||
fn do() ! {
|
||||
//base.uninstall_brew()!
|
||||
//println("something")
|
||||
if osal.is_osx() {
|
||||
// base.uninstall_brew()!
|
||||
// println("something")
|
||||
if core.is_osx()! {
|
||||
println('IS OSX')
|
||||
}
|
||||
|
||||
// mut job2 := osal.exec(cmd: 'ls /')!
|
||||
// println(job2)
|
||||
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.builder
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import os
|
||||
|
||||
|
||||
fn do1() ! {
|
||||
mut b := builder.new()!
|
||||
mut n := b.node_new(ipaddr: 'root@195.192.213.2')!
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.builder
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
@@ -10,7 +10,7 @@ mut n := b.node_new(ipaddr: 'root@51.195.61.5')!
|
||||
|
||||
println(n)
|
||||
|
||||
r:=n.exec(cmd:"ls /")!
|
||||
r := n.exec(cmd: 'ls /')!
|
||||
println(r)
|
||||
|
||||
// n.upload(source: myexamplepath, dest: '/tmp/myexamplepath2')!
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.builder
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
@@ -7,7 +7,7 @@ import os
|
||||
mut b := builder.new()!
|
||||
mut n := b.node_new(ipaddr: 'root@302:1d81:cef8:3049:ad01:796d:a5da:9c6')!
|
||||
|
||||
r:=n.exec(cmd:"ls /")!
|
||||
r := n.exec(cmd: 'ls /')!
|
||||
println(r)
|
||||
|
||||
// n.upload(source: myexamplepath, dest: '/tmp/myexamplepath2')!
|
||||
|
||||
25
examples/clients/mail.vsh
Executable file
25
examples/clients/mail.vsh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.mailclient
|
||||
|
||||
// remove the previous one, otherwise the env variables are not read
|
||||
mailclient.config_delete(name: 'test')!
|
||||
|
||||
// env variables which need to be set are:
|
||||
// - MAIL_FROM=...
|
||||
// - MAIL_PASSWORD=...
|
||||
// - MAIL_PORT=465
|
||||
// - MAIL_SERVER=...
|
||||
// - MAIL_USERNAME=...
|
||||
|
||||
mut client := mailclient.get(name: 'test')!
|
||||
|
||||
println(client)
|
||||
|
||||
client.send(
|
||||
subject: 'this is a test'
|
||||
to: 'kristof@incubaid.com'
|
||||
body: '
|
||||
this is my email content
|
||||
'
|
||||
)!
|
||||
43
examples/clients/psql.vsh
Executable file
43
examples/clients/psql.vsh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.clients.postgresql_client
|
||||
|
||||
// Configure PostgreSQL client
|
||||
heroscript := "
|
||||
!!postgresql_client.configure
|
||||
name:'test'
|
||||
user: 'postgres'
|
||||
port: 5432
|
||||
host: 'localhost'
|
||||
password: '1234'
|
||||
dbname: 'postgres'
|
||||
"
|
||||
|
||||
// Process the heroscript configuration
|
||||
postgresql_client.play(heroscript: heroscript)!
|
||||
|
||||
// Get the configured client
|
||||
mut db_client := postgresql_client.get(name: 'test')!
|
||||
|
||||
// Check if test database exists, create if not
|
||||
if !db_client.db_exists('test')! {
|
||||
println('Creating database test...')
|
||||
db_client.db_create('test')!
|
||||
}
|
||||
|
||||
// Switch to test database
|
||||
db_client.dbname = 'test'
|
||||
|
||||
// Create table if not exists
|
||||
create_table_sql := 'CREATE TABLE IF NOT EXISTS users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(100) NOT NULL,
|
||||
email VARCHAR(255) UNIQUE NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)'
|
||||
|
||||
println('Creating table users if not exists...')
|
||||
db_client.exec(create_table_sql)!
|
||||
|
||||
println('Database and table setup completed successfully!')
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.core.base
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.core.codeparser
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import time
|
||||
import freeflowuniverse.herolib.core.smartid
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.data.dbfs
|
||||
import time
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.generator.installer
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
module dagu
|
||||
|
||||
// import os
|
||||
import freeflowuniverse.herolib.clients.httpconnection
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import os
|
||||
|
||||
struct GiteaClient[T] {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import os
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import os
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import os
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.crypt.secrets
|
||||
|
||||
|
||||
1
examples/data/.gitignore
vendored
Normal file
1
examples/data/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
cache
|
||||
139
examples/data/cache.vsh
Executable file
139
examples/data/cache.vsh
Executable file
@@ -0,0 +1,139 @@
|
||||
#!/usr/bin/env -S v run
|
||||
|
||||
// Example struct to cache
|
||||
import freeflowuniverse.herolib.data.cache
|
||||
import time
|
||||
|
||||
@[heap]
|
||||
struct User {
|
||||
id u32
|
||||
name string
|
||||
age int
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Create a cache with custom configuration
|
||||
config := cache.CacheConfig{
|
||||
max_entries: 1000 // Maximum number of entries
|
||||
max_size_mb: 10.0 // Maximum cache size in MB
|
||||
ttl_seconds: 300 // Items expire after 5 minutes
|
||||
eviction_ratio: 0.2 // Evict 20% of entries when full
|
||||
}
|
||||
|
||||
mut user_cache := cache.new_cache[User](config)
|
||||
|
||||
// Create some example users
|
||||
user1 := &User{
|
||||
id: 1
|
||||
name: 'Alice'
|
||||
age: 30
|
||||
}
|
||||
|
||||
user2 := &User{
|
||||
id: 2
|
||||
name: 'Bob'
|
||||
age: 25
|
||||
}
|
||||
|
||||
// Add users to cache
|
||||
println('Adding users to cache...')
|
||||
user_cache.set(user1.id, user1)
|
||||
user_cache.set(user2.id, user2)
|
||||
|
||||
// Retrieve users from cache
|
||||
println('\nRetrieving users from cache:')
|
||||
if cached_user1 := user_cache.get(1) {
|
||||
println('Found user 1: ${cached_user1.name}, age ${cached_user1.age}')
|
||||
}
|
||||
|
||||
if cached_user2 := user_cache.get(2) {
|
||||
println('Found user 2: ${cached_user2.name}, age ${cached_user2.age}')
|
||||
}
|
||||
|
||||
// Try to get non-existent user
|
||||
println('\nTrying to get non-existent user:')
|
||||
if user := user_cache.get(999) {
|
||||
println('Found user: ${user.name}')
|
||||
} else {
|
||||
println('User not found in cache')
|
||||
}
|
||||
|
||||
// Demonstrate cache stats
|
||||
println('\nCache statistics:')
|
||||
println('Number of entries: ${user_cache.len()}')
|
||||
|
||||
// Clear the cache
|
||||
println('\nClearing cache...')
|
||||
user_cache.clear()
|
||||
println('Cache entries after clear: ${user_cache.len()}')
|
||||
|
||||
// Demonstrate max entries limit
|
||||
println('\nDemonstrating max entries limit (adding 2000 entries):')
|
||||
println('Initial cache size: ${user_cache.len()}')
|
||||
|
||||
for i := u32(0); i < 2000; i++ {
|
||||
user := &User{
|
||||
id: i
|
||||
name: 'User${i}'
|
||||
age: 20 + int(i % 50)
|
||||
}
|
||||
user_cache.set(i, user)
|
||||
|
||||
if i % 200 == 0 {
|
||||
println('After adding ${i} entries:')
|
||||
println(' Cache size: ${user_cache.len()}')
|
||||
|
||||
// Check some entries to verify LRU behavior
|
||||
if i >= 500 {
|
||||
old_id := if i < 1000 { u32(0) } else { i - 1000 }
|
||||
recent_id := i - 1
|
||||
println(' Entry ${old_id} (old): ${if _ := user_cache.get(old_id) {
|
||||
'found'
|
||||
} else {
|
||||
'evicted'
|
||||
}}')
|
||||
println(' Entry ${recent_id} (recent): ${if _ := user_cache.get(recent_id) {
|
||||
'found'
|
||||
} else {
|
||||
'evicted'
|
||||
}}')
|
||||
}
|
||||
println('')
|
||||
}
|
||||
}
|
||||
|
||||
println('Final statistics:')
|
||||
println('Cache size: ${user_cache.len()} (should be max 1000)')
|
||||
|
||||
// Verify we can only access recent entries
|
||||
println('\nVerifying LRU behavior:')
|
||||
println('First entry (0): ${if _ := user_cache.get(0) { 'found' } else { 'evicted' }}')
|
||||
println('Middle entry (1000): ${if _ := user_cache.get(1000) { 'found' } else { 'evicted' }}')
|
||||
println('Recent entry (1900): ${if _ := user_cache.get(1900) { 'found' } else { 'evicted' }}')
|
||||
println('Last entry (1999): ${if _ := user_cache.get(1999) { 'found' } else { 'evicted' }}')
|
||||
|
||||
// Demonstrate TTL expiration
|
||||
println('\nDemonstrating TTL expiration:')
|
||||
quick_config := cache.CacheConfig{
|
||||
ttl_seconds: 2 // Set short TTL for demo
|
||||
}
|
||||
mut quick_cache := cache.new_cache[User](quick_config)
|
||||
|
||||
// Add a user
|
||||
quick_cache.set(user1.id, user1)
|
||||
println('Added user to cache with 2 second TTL')
|
||||
|
||||
if cached := quick_cache.get(user1.id) {
|
||||
println('User found immediately: ${cached.name}')
|
||||
}
|
||||
|
||||
// Wait for TTL to expire
|
||||
println('Waiting for TTL to expire...')
|
||||
time.sleep(3 * time.second)
|
||||
|
||||
if _ := quick_cache.get(user1.id) {
|
||||
println('User still in cache')
|
||||
} else {
|
||||
println('User expired from cache as expected')
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.data.encoder
|
||||
import crypto.ed25519
|
||||
@@ -14,7 +14,7 @@ mut:
|
||||
_, privkey := ed25519.generate_key()!
|
||||
mut a := AStruct{
|
||||
items: ['a', 'b']
|
||||
nr: 10
|
||||
nr: 10
|
||||
// privkey: []u8{len: 5, init: u8(0xf8)}
|
||||
privkey: privkey
|
||||
}
|
||||
@@ -36,10 +36,9 @@ aa.privkey = d.get_bytes()
|
||||
|
||||
assert a == aa
|
||||
|
||||
|
||||
a = AStruct{
|
||||
items: ['a', 'b']
|
||||
nr: 10
|
||||
items: ['a', 'b']
|
||||
nr: 10
|
||||
privkey: []u8{len: 5, init: u8(0xf8)}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.crypt.aes_symmetric { decrypt, encrypt }
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
175
examples/data/graphdb.vsh
Executable file
175
examples/data/graphdb.vsh
Executable file
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
// Example demonstrating GraphDB usage in a social network context
|
||||
import freeflowuniverse.herolib.data.graphdb
|
||||
|
||||
fn main() {
|
||||
// Initialize a new graph database with default cache settings
|
||||
mut gdb := graphdb.new(
|
||||
path: '/tmp/social_network_example'
|
||||
reset: true // Start fresh each time
|
||||
)!
|
||||
|
||||
println('=== Social Network Graph Example ===\n')
|
||||
|
||||
// 1. Creating User Nodes
|
||||
println('Creating users...')
|
||||
mut alice_id := gdb.create_node({
|
||||
'type': 'user'
|
||||
'name': 'Alice Chen'
|
||||
'age': '28'
|
||||
'location': 'San Francisco'
|
||||
'occupation': 'Software Engineer'
|
||||
})!
|
||||
println('Created user: ${gdb.debug_node(alice_id)!}')
|
||||
|
||||
mut bob_id := gdb.create_node({
|
||||
'type': 'user'
|
||||
'name': 'Bob Smith'
|
||||
'age': '32'
|
||||
'location': 'New York'
|
||||
'occupation': 'Product Manager'
|
||||
})!
|
||||
println('Created user: ${gdb.debug_node(bob_id)!}')
|
||||
|
||||
mut carol_id := gdb.create_node({
|
||||
'type': 'user'
|
||||
'name': 'Carol Davis'
|
||||
'age': '27'
|
||||
'location': 'San Francisco'
|
||||
'occupation': 'Data Scientist'
|
||||
})!
|
||||
println('Created user: ${gdb.debug_node(carol_id)!}')
|
||||
|
||||
// 2. Creating Organization Nodes
|
||||
println('\nCreating organizations...')
|
||||
mut techcorp_id := gdb.create_node({
|
||||
'type': 'organization'
|
||||
'name': 'TechCorp'
|
||||
'industry': 'Technology'
|
||||
'location': 'San Francisco'
|
||||
'size': '500+'
|
||||
})!
|
||||
println('Created organization: ${gdb.debug_node(techcorp_id)!}')
|
||||
|
||||
mut datacorp_id := gdb.create_node({
|
||||
'type': 'organization'
|
||||
'name': 'DataCorp'
|
||||
'industry': 'Data Analytics'
|
||||
'location': 'New York'
|
||||
'size': '100-500'
|
||||
})!
|
||||
println('Created organization: ${gdb.debug_node(datacorp_id)!}')
|
||||
|
||||
// 3. Creating Interest Nodes
|
||||
println('\nCreating interest groups...')
|
||||
mut ai_group_id := gdb.create_node({
|
||||
'type': 'group'
|
||||
'name': 'AI Enthusiasts'
|
||||
'category': 'Technology'
|
||||
'members': '0'
|
||||
})!
|
||||
println('Created group: ${gdb.debug_node(ai_group_id)!}')
|
||||
|
||||
// 4. Establishing Relationships
|
||||
println('\nCreating relationships...')
|
||||
|
||||
// Friendship relationships
|
||||
gdb.create_edge(alice_id, bob_id, 'FRIENDS', {
|
||||
'since': '2022'
|
||||
'strength': 'close'
|
||||
})!
|
||||
gdb.create_edge(alice_id, carol_id, 'FRIENDS', {
|
||||
'since': '2023'
|
||||
'strength': 'close'
|
||||
})!
|
||||
|
||||
// Employment relationships
|
||||
gdb.create_edge(alice_id, techcorp_id, 'WORKS_AT', {
|
||||
'role': 'Senior Engineer'
|
||||
'since': '2021'
|
||||
'department': 'Engineering'
|
||||
})!
|
||||
gdb.create_edge(bob_id, datacorp_id, 'WORKS_AT', {
|
||||
'role': 'Product Lead'
|
||||
'since': '2020'
|
||||
'department': 'Product'
|
||||
})!
|
||||
gdb.create_edge(carol_id, techcorp_id, 'WORKS_AT', {
|
||||
'role': 'Data Scientist'
|
||||
'since': '2022'
|
||||
'department': 'Analytics'
|
||||
})!
|
||||
|
||||
// Group memberships
|
||||
gdb.create_edge(alice_id, ai_group_id, 'MEMBER_OF', {
|
||||
'joined': '2023'
|
||||
'status': 'active'
|
||||
})!
|
||||
gdb.create_edge(carol_id, ai_group_id, 'MEMBER_OF', {
|
||||
'joined': '2023'
|
||||
'status': 'active'
|
||||
})!
|
||||
|
||||
// 5. Querying the Graph
|
||||
println('\nPerforming queries...')
|
||||
|
||||
// Find users in San Francisco
|
||||
println('\nUsers in San Francisco:')
|
||||
sf_users := gdb.query_nodes_by_property('location', 'San Francisco')!
|
||||
for user in sf_users {
|
||||
if user.properties['type'] == 'user' {
|
||||
println('- ${user.properties['name']} (${user.properties['occupation']})')
|
||||
}
|
||||
}
|
||||
|
||||
// Find Alice's friends
|
||||
println("\nAlice's friends:")
|
||||
alice_friends := gdb.get_connected_nodes(alice_id, 'FRIENDS', 'out')!
|
||||
for friend in alice_friends {
|
||||
println('- ${friend.properties['name']} in ${friend.properties['location']}')
|
||||
}
|
||||
|
||||
// Find where Alice works
|
||||
println("\nAlice's workplace:")
|
||||
alice_workplaces := gdb.get_connected_nodes(alice_id, 'WORKS_AT', 'out')!
|
||||
for workplace in alice_workplaces {
|
||||
println('- ${workplace.properties['name']} (${workplace.properties['industry']})')
|
||||
}
|
||||
|
||||
// Find TechCorp employees
|
||||
println('\nTechCorp employees:')
|
||||
techcorp_employees := gdb.get_connected_nodes(techcorp_id, 'WORKS_AT', 'in')!
|
||||
for employee in techcorp_employees {
|
||||
println('- ${employee.properties['name']} as ${employee.properties['occupation']}')
|
||||
}
|
||||
|
||||
// Find AI group members
|
||||
println('\nAI Enthusiasts group members:')
|
||||
ai_members := gdb.get_connected_nodes(ai_group_id, 'MEMBER_OF', 'in')!
|
||||
for member in ai_members {
|
||||
println('- ${member.properties['name']}')
|
||||
}
|
||||
|
||||
// 6. Updating Data
|
||||
println('\nUpdating data...')
|
||||
|
||||
// Promote Alice
|
||||
println('\nPromoting Alice...')
|
||||
mut alice := gdb.get_node(alice_id)!
|
||||
alice.properties['occupation'] = 'Lead Software Engineer'
|
||||
gdb.update_node(alice_id, alice.properties)!
|
||||
|
||||
// Update Alice's work relationship
|
||||
mut edges := gdb.get_edges_between(alice_id, techcorp_id)!
|
||||
if edges.len > 0 {
|
||||
gdb.update_edge(edges[0].id, {
|
||||
'role': 'Engineering Team Lead'
|
||||
'since': '2021'
|
||||
'department': 'Engineering'
|
||||
})!
|
||||
}
|
||||
|
||||
println('\nFinal graph structure:')
|
||||
gdb.print_graph()!
|
||||
}
|
||||
@@ -1,31 +1,33 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import freeflowuniverse.herolib.core.base
|
||||
|
||||
//this is docu at top
|
||||
@[name:"teststruct " ; params]
|
||||
// this is docu at top
|
||||
@[name: 'teststruct ']
|
||||
@[params]
|
||||
pub struct TestStruct {
|
||||
//this is docu at mid
|
||||
// this is docu at mid
|
||||
pub mut:
|
||||
id int @[hi]
|
||||
descr string
|
||||
secret string @[secret]
|
||||
number int = 1 @[min:1 ;max:10]
|
||||
id int @[hi]
|
||||
descr string
|
||||
secret string @[secret]
|
||||
number int = 1 @[max: 10; min: 1]
|
||||
yesno bool
|
||||
liststr []string
|
||||
listint []int
|
||||
ss SubStruct
|
||||
ss2 []SubStruct
|
||||
ss SubStruct
|
||||
ss2 []SubStruct
|
||||
}
|
||||
|
||||
pub struct SubStruct {
|
||||
pub mut:
|
||||
color string
|
||||
size int
|
||||
size int
|
||||
}
|
||||
|
||||
fn (self TestStruct) heroscript()!string {
|
||||
mut out:=""
|
||||
fn (self TestStruct) heroscript() !string {
|
||||
mut out := ''
|
||||
mut p := encoderhero.encode[TestStruct](self)!
|
||||
// out += "!!hr.teststruct_define " + p.heroscript() + "\n"
|
||||
// p = paramsparser.encode[SubStruct](self.ss)!
|
||||
@@ -39,10 +41,9 @@ fn (self TestStruct) heroscript()!string {
|
||||
return p
|
||||
}
|
||||
|
||||
|
||||
mut t := TestStruct{
|
||||
id:100
|
||||
descr: '
|
||||
id: 100
|
||||
descr: '
|
||||
test
|
||||
muliline
|
||||
s
|
||||
@@ -50,15 +51,24 @@ mut t := TestStruct{
|
||||
muliline
|
||||
test
|
||||
muliline
|
||||
'
|
||||
number: 2
|
||||
yesno: true
|
||||
'
|
||||
number: 2
|
||||
yesno: true
|
||||
liststr: ['one', 'two+two']
|
||||
listint: [1, 2]
|
||||
ss:SubStruct{color:"red",size:10}
|
||||
ss: SubStruct{
|
||||
color: 'red'
|
||||
size: 10
|
||||
}
|
||||
}
|
||||
t.ss2 << SubStruct{
|
||||
color: 'red1'
|
||||
size: 11
|
||||
}
|
||||
t.ss2 << SubStruct{
|
||||
color: 'red2'
|
||||
size: 12
|
||||
}
|
||||
t.ss2<< SubStruct{color:"red1",size:11}
|
||||
t.ss2<< SubStruct{color:"red2",size:12}
|
||||
|
||||
println(t.heroscript()!)
|
||||
|
||||
|
||||
29
examples/data/heroencoder_simple.vsh
Executable file
29
examples/data/heroencoder_simple.vsh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import time
|
||||
|
||||
struct Person {
|
||||
mut:
|
||||
name string
|
||||
age int = 20
|
||||
birthday time.Time
|
||||
}
|
||||
|
||||
mut person := Person{
|
||||
name: 'Bob'
|
||||
birthday: time.now()
|
||||
}
|
||||
heroscript := encoderhero.encode[Person](person)!
|
||||
|
||||
println(heroscript)
|
||||
|
||||
person2 := encoderhero.decode[Person](heroscript)!
|
||||
println(person2)
|
||||
|
||||
// show that it doesn't matter which action & method is used
|
||||
heroscript2 := "!!a.b name:Bob age:20 birthday:'2025-02-06 09:57:30'"
|
||||
person3 := encoderhero.decode[Person](heroscript)!
|
||||
|
||||
println(person3)
|
||||
35
examples/data/jsonexample.vsh
Executable file
35
examples/data/jsonexample.vsh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import json
|
||||
|
||||
enum JobTitle {
|
||||
manager
|
||||
executive
|
||||
worker
|
||||
}
|
||||
|
||||
struct Employee {
|
||||
mut:
|
||||
name string
|
||||
family string @[json: '-'] // this field will be skipped
|
||||
age int
|
||||
salary f32
|
||||
title JobTitle @[json: 'ETitle'] // the key for this field will be 'ETitle', not 'title'
|
||||
notes string @[omitempty] // the JSON property is not created if the string is equal to '' (an empty string).
|
||||
// TODO: document @[raw]
|
||||
}
|
||||
|
||||
x := Employee{'Peter', 'Begins', 28, 95000.5, .worker, ''}
|
||||
println(x)
|
||||
s := json.encode(x)
|
||||
println('JSON encoding of employee x: ${s}')
|
||||
assert s == '{"name":"Peter","age":28,"salary":95000.5,"ETitle":"worker"}'
|
||||
mut y := json.decode(Employee, s)!
|
||||
assert y != x
|
||||
assert y.family == ''
|
||||
y.family = 'Begins'
|
||||
assert y == x
|
||||
println(y)
|
||||
ss := json.encode(y)
|
||||
println('JSON encoding of employee y: ${ss}')
|
||||
assert ss == s
|
||||
63
examples/data/location/location_example.vsh
Executable file
63
examples/data/location/location_example.vsh
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env -S v -n -w -cg -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.postgresql_client
|
||||
import freeflowuniverse.herolib.data.location
|
||||
|
||||
// Configure PostgreSQL client
|
||||
heroscript := "
|
||||
!!postgresql_client.configure
|
||||
name:'test'
|
||||
user: 'postgres'
|
||||
port: 5432
|
||||
host: 'localhost'
|
||||
password: '1234'
|
||||
dbname: 'postgres'
|
||||
"
|
||||
|
||||
// Process the heroscript configuration
|
||||
postgresql_client.play(heroscript: heroscript)!
|
||||
|
||||
// Get the configured client
|
||||
mut db_client := postgresql_client.get(name: 'test')!
|
||||
|
||||
// Create a new location instance
|
||||
mut loc := location.new(mut db_client, false) or { panic(err) }
|
||||
println('Location database initialized')
|
||||
|
||||
// Initialize the database (downloads and imports data)
|
||||
// This only needs to be done once or when updating data
|
||||
println('Downloading and importing location data (this may take a few minutes)...')
|
||||
|
||||
// the arg is if we redownload
|
||||
loc.download_and_import(false) or { panic(err) }
|
||||
println('Data import complete')
|
||||
|
||||
// // Example 1: Search for a city
|
||||
// println('\nSearching for London...')
|
||||
// results := loc.search('London', 'GB', 5, true) or { panic(err) }
|
||||
// for result in results {
|
||||
// println('${result.city.name}, ${result.country.name} (${result.country.iso2})')
|
||||
// println('Coordinates: ${result.city.latitude}, ${result.city.longitude}')
|
||||
// println('Population: ${result.city.population}')
|
||||
// println('Timezone: ${result.city.timezone}')
|
||||
// println('---')
|
||||
// }
|
||||
|
||||
// // Example 2: Search near coordinates (10km radius from London)
|
||||
// println('\nSearching for cities within 10km of London...')
|
||||
// nearby := loc.search_near(51.5074, -0.1278, 10.0, 5) or { panic(err) }
|
||||
// for result in nearby {
|
||||
// println('${result.city.name}, ${result.country.name}')
|
||||
// println('Distance from center: Approx ${result.similarity:.1f}km')
|
||||
// println('---')
|
||||
// }
|
||||
|
||||
// // Example 3: Fuzzy search in a specific country
|
||||
// println('\nFuzzy searching for "New" in United States...')
|
||||
// us_cities := loc.search('New', 'US', 5, true) or { panic(err) }
|
||||
// for result in us_cities {
|
||||
// println('${result.city.name}, ${result.country.name}')
|
||||
// println('State: ${result.city.state_name} (${result.city.state_code})')
|
||||
// println('Population: ${result.city.population}')
|
||||
// println('---')
|
||||
// }
|
||||
63
examples/data/location/location_example_tcc.vsh
Executable file
63
examples/data/location/location_example_tcc.vsh
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.postgresql_client
|
||||
import freeflowuniverse.herolib.data.location
|
||||
|
||||
// Configure PostgreSQL client
|
||||
heroscript := "
|
||||
!!postgresql_client.configure
|
||||
name:'test'
|
||||
user: 'postgres'
|
||||
port: 5432
|
||||
host: 'localhost'
|
||||
password: '1234'
|
||||
dbname: 'postgres'
|
||||
"
|
||||
|
||||
// Process the heroscript configuration
|
||||
postgresql_client.play(heroscript: heroscript)!
|
||||
|
||||
// Get the configured client
|
||||
mut db_client := postgresql_client.get(name: 'test')!
|
||||
|
||||
// Create a new location instance
|
||||
mut loc := location.new(mut db_client, false) or { panic(err) }
|
||||
println('Location database initialized')
|
||||
|
||||
// Initialize the database (downloads and imports data)
|
||||
// This only needs to be done once or when updating data
|
||||
println('Downloading and importing location data (this may take a few minutes)...')
|
||||
|
||||
// the arg is if we redownload
|
||||
loc.download_and_import(false) or { panic(err) }
|
||||
println('Data import complete')
|
||||
|
||||
// // Example 1: Search for a city
|
||||
// println('\nSearching for London...')
|
||||
// results := loc.search('London', 'GB', 5, true) or { panic(err) }
|
||||
// for result in results {
|
||||
// println('${result.city.name}, ${result.country.name} (${result.country.iso2})')
|
||||
// println('Coordinates: ${result.city.latitude}, ${result.city.longitude}')
|
||||
// println('Population: ${result.city.population}')
|
||||
// println('Timezone: ${result.city.timezone}')
|
||||
// println('---')
|
||||
// }
|
||||
|
||||
// // Example 2: Search near coordinates (10km radius from London)
|
||||
// println('\nSearching for cities within 10km of London...')
|
||||
// nearby := loc.search_near(51.5074, -0.1278, 10.0, 5) or { panic(err) }
|
||||
// for result in nearby {
|
||||
// println('${result.city.name}, ${result.country.name}')
|
||||
// println('Distance from center: Approx ${result.similarity:.1f}km')
|
||||
// println('---')
|
||||
// }
|
||||
|
||||
// // Example 3: Fuzzy search in a specific country
|
||||
// println('\nFuzzy searching for "New" in United States...')
|
||||
// us_cities := loc.search('New', 'US', 5, true) or { panic(err) }
|
||||
// for result in us_cities {
|
||||
// println('${result.city.name}, ${result.country.name}')
|
||||
// println('State: ${result.city.state_name} (${result.city.state_code})')
|
||||
// println('Population: ${result.city.population}')
|
||||
// println('---')
|
||||
// }
|
||||
40
examples/data/ourdb_example.vsh
Executable file
40
examples/data/ourdb_example.vsh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
|
||||
const test_dir = '/tmp/ourdb'
|
||||
|
||||
mut db := ourdb.new(
|
||||
record_nr_max: 16777216 - 1 // max size of records
|
||||
record_size_max: 1024
|
||||
path: test_dir
|
||||
reset: true
|
||||
)!
|
||||
|
||||
defer {
|
||||
db.destroy() or { panic('failed to destroy db: ${err}') }
|
||||
}
|
||||
|
||||
// Test set and get
|
||||
test_data := 'Hello, World!'.bytes()
|
||||
id := db.set(data: test_data)!
|
||||
|
||||
retrieved := db.get(id)!
|
||||
assert retrieved == test_data
|
||||
|
||||
assert id == 0
|
||||
|
||||
// Test overwrite
|
||||
new_data := 'Updated data'.bytes()
|
||||
id2 := db.set(id: 0, data: new_data)!
|
||||
assert id2 == 0
|
||||
|
||||
// // Verify lookup table has the correct location
|
||||
// location := db.lookup.get(id2)!
|
||||
// println('Location after update - file_nr: ${location.file_nr}, position: ${location.position}')
|
||||
|
||||
// Get and verify the updated data
|
||||
retrieved2 := db.get(id2)!
|
||||
println('Retrieved data: ${retrieved2}')
|
||||
println('Expected data: ${new_data}')
|
||||
assert retrieved2 == new_data
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser { Params, parse }
|
||||
import time { Duration, sleep }
|
||||
import time
|
||||
|
||||
totalnr := 1000000
|
||||
|
||||
|
||||
33
examples/data/radixtree.vsh
Executable file
33
examples/data/radixtree.vsh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.data.radixtree
|
||||
|
||||
mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true)!
|
||||
|
||||
// Show initial state
|
||||
println('\nInitial state:')
|
||||
rt.debug_db()!
|
||||
|
||||
// Test insert
|
||||
println('\nInserting key "test" with value "value1"')
|
||||
rt.insert('test', 'value1'.bytes())!
|
||||
|
||||
// Show state after insert
|
||||
println('\nState after insert:')
|
||||
rt.debug_db()!
|
||||
|
||||
// Print tree structure
|
||||
rt.print_tree()!
|
||||
|
||||
// Test search
|
||||
if value := rt.search('test') {
|
||||
println('\nFound value: ${value.bytestr()}')
|
||||
} else {
|
||||
println('\nError: ${err}')
|
||||
}
|
||||
|
||||
println('\nInserting key "test2" with value "value2"')
|
||||
rt.insert('test2', 'value2'.bytes())!
|
||||
|
||||
// Print tree structure
|
||||
rt.print_tree()!
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.data.resp
|
||||
import crypto.ed25519
|
||||
|
||||
@@ -1,25 +1,10 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cg -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
// #!/usr/bin/env -S v -n -w -cg -d use_openssl -enable-globals run
|
||||
//-parallel-cc
|
||||
import os
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import freeflowuniverse.herolib.develop.performance
|
||||
|
||||
mut silent := false
|
||||
mut gs := gittools.get(reload: true)!
|
||||
|
||||
coderoot := if 'CODEROOT' in os.environ() {
|
||||
os.environ()['CODEROOT']
|
||||
} else {os.join_path(os.home_dir(), 'code')}
|
||||
|
||||
mut gs := gittools.get()!
|
||||
if coderoot.len > 0 {
|
||||
//is a hack for now
|
||||
gs = gittools.new(coderoot: coderoot)!
|
||||
}
|
||||
|
||||
mypath := gs.do(
|
||||
recursive: true
|
||||
cmd: 'list'
|
||||
)!
|
||||
|
||||
timer := performance.new('gittools')
|
||||
timer.timeline()
|
||||
gs.repos_print()!
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import freeflowuniverse.herolib.osal
|
||||
import time
|
||||
|
||||
|
||||
mut gs_default := gittools.new()!
|
||||
|
||||
println(gs_default)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.develop.gittools
|
||||
import freeflowuniverse.herolib.osal
|
||||
import time
|
||||
|
||||
// Creates a new file in the specified repository path and returns its name.
|
||||
fn create_new_file(repo_path string, runtime i64)! string {
|
||||
fn create_new_file(repo_path string, runtime i64) !string {
|
||||
coded_now := time.now().unix()
|
||||
file_name := 'hello_world_${coded_now}.py'
|
||||
println('Creating a new ${file_name} file.')
|
||||
@@ -29,8 +29,8 @@ mut repo := gs_default.get_repo(name: 'repo3')!
|
||||
// mut repo := gs_default.get_repo(name: 'repo3' clone: true, url: 'https://github.com/Mahmoud-Emad/repo2.git')!
|
||||
|
||||
runtime := time.now().unix()
|
||||
branch_name := "branch_${runtime}"
|
||||
tag_name := "tag_${runtime}"
|
||||
branch_name := 'branch_${runtime}'
|
||||
tag_name := 'tag_${runtime}'
|
||||
repo_path := repo.get_path()!
|
||||
mut file_name := create_new_file(repo_path, runtime)!
|
||||
|
||||
@@ -51,36 +51,28 @@ repo.checkout(branch_name: branch_name, pull: false) or {
|
||||
// Check for changes and stage them if present.
|
||||
if repo.has_changes()! {
|
||||
println('Adding the changes...')
|
||||
repo.add_changes() or {
|
||||
error('Cannot add the changes due to: ${err}')
|
||||
}
|
||||
repo.add_changes() or { error('Cannot add the changes due to: ${err}') }
|
||||
}
|
||||
|
||||
// Check if a commit is needed and commit changes if necessary.
|
||||
if repo.need_commit()! {
|
||||
commit_msg := 'feat: Added ${file_name} file.'
|
||||
println('Committing the changes, Commit message: ${commit_msg}.')
|
||||
repo.commit(msg: commit_msg) or {
|
||||
error('Cannot commit the changes due to: ${err}')
|
||||
}
|
||||
repo.commit(msg: commit_msg) or { error('Cannot commit the changes due to: ${err}') }
|
||||
}
|
||||
|
||||
// Push changes to the remote repository if necessary.
|
||||
if repo.need_push()! {
|
||||
println('Pushing the changes...')
|
||||
repo.push() or {
|
||||
error('Cannot push the changes due to: ${err}')
|
||||
}
|
||||
repo.push() or { error('Cannot push the changes due to: ${err}') }
|
||||
}
|
||||
|
||||
if repo.need_pull()! {
|
||||
println('Pulling the changes.')
|
||||
repo.pull() or {
|
||||
error('Cannot pull the changes due to: ${err}')
|
||||
}
|
||||
repo.pull() or { error('Cannot pull the changes due to: ${err}') }
|
||||
}
|
||||
|
||||
// Checkout to the base branch
|
||||
// Checkout to the base branch
|
||||
repo.checkout(checkout_to_base_branch: true, pull: true) or {
|
||||
error("Couldn't checkout to branch ${branch_name} due to: ${err}")
|
||||
}
|
||||
@@ -93,12 +85,8 @@ repo.create_tag(tag_name: tag_name, checkout: false) or {
|
||||
|
||||
// Push the created tag.
|
||||
println('Pushing the tag...')
|
||||
repo.push(push_tag: true) or {
|
||||
error('Cannot push the tag due to: ${err}')
|
||||
}
|
||||
repo.push(push_tag: true) or { error('Cannot push the tag due to: ${err}') }
|
||||
|
||||
// Check if the created tag exists.
|
||||
println('Check if the created tag exists...')
|
||||
repo.is_tag_exists(tag_name: tag_name) or {
|
||||
println("Tag isn't exists.")
|
||||
}
|
||||
repo.is_tag_exists(tag_name: tag_name) or { println("Tag isn't exists.") }
|
||||
|
||||
8
examples/develop/ipapi/example.vsh
Executable file
8
examples/develop/ipapi/example.vsh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.ipapi
|
||||
import os
|
||||
|
||||
mut ip_api_client := ipapi.get()!
|
||||
info := ip_api_client.get_ip_info('37.27.132.46')!
|
||||
println('info: ${info}')
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.osal
|
||||
@@ -8,16 +8,16 @@ import veb
|
||||
osal.load_env_file('${os.dir(@FILE)}/.env')!
|
||||
|
||||
mut j := juggler.configure(
|
||||
url: 'https://git.ourworld.tf/projectmycelium/itenv'
|
||||
url: 'https://git.ourworld.tf/projectmycelium/itenv'
|
||||
username: os.getenv('JUGGLER_USERNAME')
|
||||
password: os.getenv('JUGGLER_PASSWORD')
|
||||
reset: true
|
||||
reset: true
|
||||
)!
|
||||
|
||||
spawn j.run(8000)
|
||||
println(j.info())
|
||||
|
||||
for{}
|
||||
for {}
|
||||
|
||||
// TODO
|
||||
// - automate caddy install/start
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import os
|
||||
|
||||
mut sm := startupmanager.get()!
|
||||
sm.start(
|
||||
name: 'juggler'
|
||||
cmd: 'hero juggler -secret planetfirst -u https://git.ourworld.tf/projectmycelium/itenv -reset true'
|
||||
env: {'HOME': os.home_dir()}
|
||||
name: 'juggler'
|
||||
cmd: 'hero juggler -secret planetfirst -u https://git.ourworld.tf/projectmycelium/itenv -reset true'
|
||||
env: {
|
||||
'HOME': os.home_dir()
|
||||
}
|
||||
restart: true
|
||||
) or {panic('failed to start sm ${err}')}
|
||||
) or { panic('failed to start sm ${err}') }
|
||||
|
||||
// TODO
|
||||
// - automate caddy install/start
|
||||
|
||||
@@ -1,28 +1,28 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.develop.luadns
|
||||
|
||||
fn main() {
|
||||
mut lua_dns := luadns.load('https://github.com/Incubaid/dns') or {
|
||||
eprintln('Failed to parse LuaDNS files: $err')
|
||||
return
|
||||
}
|
||||
mut lua_dns := luadns.load('https://github.com/Incubaid/dns') or {
|
||||
eprintln('Failed to parse LuaDNS files: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
lua_dns.set_domain('test.protocol.me', '65.21.132.119') or {
|
||||
eprintln('Failed to set domain: $err')
|
||||
return
|
||||
}
|
||||
lua_dns.set_domain('test.protocol.me', '65.21.132.119') or {
|
||||
eprintln('Failed to set domain: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
lua_dns.set_domain('example.protocol.me', '65.21.132.119') or {
|
||||
eprintln('Failed to set domain: $err')
|
||||
return
|
||||
}
|
||||
|
||||
for config in lua_dns.configs {
|
||||
println(config)
|
||||
}
|
||||
|
||||
for config in lua_dns.configs {
|
||||
println(config)
|
||||
}
|
||||
lua_dns.set_domain('example.protocol.me', '65.21.132.119') or {
|
||||
eprintln('Failed to set domain: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
for config in lua_dns.configs {
|
||||
println(config)
|
||||
}
|
||||
|
||||
for config in lua_dns.configs {
|
||||
println(config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai as op
|
||||
|
||||
mut ai_cli := op.new()!
|
||||
mut msg := []op.Message{}
|
||||
msg << op.Message{
|
||||
role: op.RoleType.user
|
||||
role: op.RoleType.user
|
||||
content: 'Say this is a test!'
|
||||
}
|
||||
mut msgs := op.Messages{
|
||||
@@ -19,26 +19,26 @@ models := ai_cli.list_models()!
|
||||
model := ai_cli.get_model(models.data[0].id)!
|
||||
print(model)
|
||||
images_created := ai_cli.create_image(op.ImageCreateArgs{
|
||||
prompt: 'Calm weather'
|
||||
prompt: 'Calm weather'
|
||||
num_images: 2
|
||||
size: op.ImageSize.size_512_512
|
||||
format: op.ImageRespType.url
|
||||
size: op.ImageSize.size_512_512
|
||||
format: op.ImageRespType.url
|
||||
})!
|
||||
print(images_created)
|
||||
images_updated := ai_cli.create_edit_image(op.ImageEditArgs{
|
||||
image_path: '/path/to/image.png'
|
||||
mask_path: '/path/to/mask.png'
|
||||
prompt: 'Calm weather'
|
||||
mask_path: '/path/to/mask.png'
|
||||
prompt: 'Calm weather'
|
||||
num_images: 2
|
||||
size: op.ImageSize.size_512_512
|
||||
format: op.ImageRespType.url
|
||||
size: op.ImageSize.size_512_512
|
||||
format: op.ImageRespType.url
|
||||
})!
|
||||
print(images_updated)
|
||||
images_variatons := ai_cli.create_variation_image(op.ImageVariationArgs{
|
||||
image_path: '/path/to/image.png'
|
||||
num_images: 2
|
||||
size: op.ImageSize.size_512_512
|
||||
format: op.ImageRespType.url
|
||||
size: op.ImageSize.size_512_512
|
||||
format: op.ImageRespType.url
|
||||
})!
|
||||
print(images_variatons)
|
||||
|
||||
|
||||
93
examples/develop/runpod/runpod_example.vsh
Executable file
93
examples/develop/runpod/runpod_example.vsh
Executable file
@@ -0,0 +1,93 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
// import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.clients.runpod
|
||||
import json
|
||||
import x.json2
|
||||
|
||||
// Create client with direct API key
|
||||
// This uses RUNPOD_API_KEY from environment
|
||||
mut rp := runpod.get()!
|
||||
|
||||
// Create a new on demand pod
|
||||
on_demand_pod_response := rp.create_on_demand_pod(
|
||||
name: 'RunPod Tensorflow'
|
||||
image_name: 'runpod/tensorflow'
|
||||
cloud_type: 'ALL'
|
||||
gpu_count: 1
|
||||
volume_in_gb: 5
|
||||
container_disk_in_gb: 5
|
||||
min_memory_in_gb: 4
|
||||
min_vcpu_count: 1
|
||||
gpu_type_id: 'NVIDIA RTX A4000'
|
||||
ports: '8888/http'
|
||||
volume_mount_path: '/workspace'
|
||||
env: [
|
||||
runpod.EnvironmentVariableInput{
|
||||
key: 'JUPYTER_PASSWORD'
|
||||
value: 'rn51hunbpgtltcpac3ol'
|
||||
},
|
||||
]
|
||||
)!
|
||||
|
||||
println('Created pod with ID: ${on_demand_pod_response.id}')
|
||||
|
||||
// create a spot pod
|
||||
spot_pod_response := rp.create_spot_pod(
|
||||
port: 1826
|
||||
bid_per_gpu: 0.2
|
||||
cloud_type: 'SECURE'
|
||||
gpu_count: 1
|
||||
volume_in_gb: 5
|
||||
container_disk_in_gb: 5
|
||||
min_vcpu_count: 1
|
||||
min_memory_in_gb: 4
|
||||
gpu_type_id: 'NVIDIA RTX A4000'
|
||||
name: 'RunPod Pytorch'
|
||||
image_name: 'runpod/pytorch'
|
||||
docker_args: ''
|
||||
ports: '8888/http'
|
||||
volume_mount_path: '/workspace'
|
||||
env: [
|
||||
runpod.EnvironmentVariableInput{
|
||||
key: 'JUPYTER_PASSWORD'
|
||||
value: 'rn51hunbpgtltcpac3ol'
|
||||
},
|
||||
]
|
||||
)!
|
||||
println('Created spot pod with ID: ${spot_pod_response.id}')
|
||||
|
||||
// stop on-demand pod
|
||||
stop_on_demand_pod := rp.stop_pod(
|
||||
pod_id: '${on_demand_pod_response.id}'
|
||||
)!
|
||||
println('Stopped on-demand pod with ID: ${stop_on_demand_pod.id}')
|
||||
|
||||
// stop spot pod
|
||||
stop_spot_pod := rp.stop_pod(
|
||||
pod_id: '${spot_pod_response.id}'
|
||||
)!
|
||||
println('Stopped spot pod with ID: ${stop_spot_pod.id}')
|
||||
|
||||
// start on-demand pod
|
||||
start_on_demand_pod := rp.start_on_demand_pod(pod_id: '${on_demand_pod_response.id}', gpu_count: 1)!
|
||||
println('Started on demand pod with ID: ${on_demand_pod_response.id}')
|
||||
|
||||
// start spot pod
|
||||
start_spot_pod := rp.start_spot_pod(
|
||||
pod_id: '${spot_pod_response.id}'
|
||||
gpu_count: 1
|
||||
bid_per_gpu: 0.2
|
||||
)!
|
||||
println('Started spot pod with ID: ${spot_pod_response.id}')
|
||||
|
||||
get_pod := rp.get_pod(
|
||||
pod_id: '${spot_pod_response.id}'
|
||||
)!
|
||||
println('Get pod result: ${get_pod}')
|
||||
|
||||
rp.terminate_pod(pod_id: '${spot_pod_response.id}')!
|
||||
println('pod with id ${spot_pod_response.id} is terminated')
|
||||
|
||||
rp.terminate_pod(pod_id: '${on_demand_pod_response.id}')!
|
||||
println('pod with id ${on_demand_pod_response.id} is terminated')
|
||||
66
examples/develop/vastai/vastai_example.vsh
Executable file
66
examples/develop/vastai/vastai_example.vsh
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.vastai
|
||||
import json
|
||||
import x.json2
|
||||
|
||||
// Create client with direct API key
|
||||
// This uses VASTAI_API_KEY from environment
|
||||
mut va := vastai.get()!
|
||||
|
||||
offers := va.search_offers()!
|
||||
println('offers: ${offers}')
|
||||
|
||||
top_offers := va.get_top_offers(5)!
|
||||
println('top offers: ${top_offers}')
|
||||
|
||||
create_instance_res := va.create_instance(
|
||||
id: top_offers[0].id
|
||||
config: vastai.CreateInstanceConfig{
|
||||
image: 'pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime'
|
||||
disk: 10
|
||||
}
|
||||
)!
|
||||
println('create instance res: ${create_instance_res}')
|
||||
|
||||
attach_sshkey_to_instance_res := va.attach_sshkey_to_instance(
|
||||
id: 1
|
||||
ssh_key: 'ssh-rsa AAAA...'
|
||||
)!
|
||||
println('attach sshkey to instance res: ${attach_sshkey_to_instance_res}')
|
||||
|
||||
stop_instance_res := va.stop_instance(
|
||||
id: 1
|
||||
state: 'stopped'
|
||||
)!
|
||||
println('stop instance res: ${stop_instance_res}')
|
||||
|
||||
destroy_instance_res := va.destroy_instance(
|
||||
id: 1
|
||||
)!
|
||||
println('destroy instance res: ${destroy_instance_res}')
|
||||
|
||||
// For some reason this method returns an error from their server, 500 ERROR
|
||||
// (request failed with code 500: {"error":"server_error","msg":"Something went wrong on the server"})
|
||||
launch_instance_res := va.launch_instance(
|
||||
// Required
|
||||
num_gpus: 1
|
||||
gpu_name: 'RTX_3090'
|
||||
image: 'vastai/tensorflow'
|
||||
disk: 10
|
||||
region: 'us-west'
|
||||
|
||||
// Optional
|
||||
env: 'user=7amada, home=/home/7amada'
|
||||
)!
|
||||
println('destroy instance res: ${launch_instance_res}')
|
||||
|
||||
start_instances_res := va.start_instances(
|
||||
ids: [1, 2, 3]
|
||||
)!
|
||||
println('start instances res: ${start_instances_res}')
|
||||
|
||||
start_instance_res := va.start_instance(
|
||||
id: 1
|
||||
)!
|
||||
println('start instance res: ${start_instance_res}')
|
||||
8
examples/develop/wireguard/wg0.conf
Normal file
8
examples/develop/wireguard/wg0.conf
Normal file
@@ -0,0 +1,8 @@
|
||||
[Interface]
|
||||
Address = 10.10.3.0/24
|
||||
PrivateKey = wDewSiri8jlaGnUDN6SwK7QhN082U7gfX27YMGILvVA=
|
||||
[Peer]
|
||||
PublicKey = 2JEGJQ8FbajdFk0fFs/881H/D3FRjwlUxvNDZFxDeWQ=
|
||||
AllowedIPs = 10.10.0.0/16, 100.64.0.0/16
|
||||
PersistentKeepalive = 25
|
||||
Endpoint = 185.206.122.31:3241
|
||||
35
examples/develop/wireguard/wireguard.vsh
Executable file
35
examples/develop/wireguard/wireguard.vsh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.wireguard
|
||||
import freeflowuniverse.herolib.installers.net.wireguard as wireguard_installer
|
||||
import time
|
||||
import os
|
||||
|
||||
mut wg_installer := wireguard_installer.get()!
|
||||
wg_installer.install()!
|
||||
|
||||
// Create Wireguard client
|
||||
mut wg := wireguard.get()!
|
||||
config_file_path := '${os.dir(@FILE)}/wg0.conf'
|
||||
|
||||
wg.start(config_file_path: config_file_path)!
|
||||
println('${config_file_path} is started')
|
||||
|
||||
time.sleep(time.second * 2)
|
||||
|
||||
info := wg.show()!
|
||||
println('info: ${info}')
|
||||
|
||||
config := wg.show_config(interface_name: 'wg0')!
|
||||
println('config: ${config}')
|
||||
|
||||
private_key := wg.generate_private_key()!
|
||||
println('private_key: ${private_key}')
|
||||
|
||||
public_key := wg.get_public_key(private_key: private_key)!
|
||||
println('public_key: ${public_key}')
|
||||
|
||||
wg.down(config_file_path: config_file_path)!
|
||||
println('${config_file_path} is down')
|
||||
|
||||
wg_installer.destroy()!
|
||||
@@ -1,8 +1,7 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.hero.bootstrap
|
||||
|
||||
mut al:=bootstrap.new_alpine_loader()
|
||||
mut al := bootstrap.new_alpine_loader()
|
||||
|
||||
al.start()!
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.hero.generation
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.hero.generation
|
||||
|
||||
generation.generate_actor(
|
||||
name: 'Example'
|
||||
name: 'Example'
|
||||
interfaces: []
|
||||
)
|
||||
|
||||
@@ -20,17 +20,17 @@ fn new() !ExampleActor {
|
||||
}
|
||||
|
||||
pub fn run() ! {
|
||||
mut a_ := new()!
|
||||
mut a := IActor(a_)
|
||||
a.run()!
|
||||
mut a_ := new()!
|
||||
mut a := IActor(a_)
|
||||
a.run()!
|
||||
}
|
||||
|
||||
pub fn run_server(params RunParams) ! {
|
||||
mut a := new()!
|
||||
mut server := actor.new_server(
|
||||
redis_url: 'localhost:6379'
|
||||
redis_queue: a.name
|
||||
openapi_spec: openapi_specification
|
||||
)!
|
||||
server.run(params)
|
||||
}
|
||||
mut a := new()!
|
||||
mut server := actor.new_server(
|
||||
redis_url: 'localhost:6379'
|
||||
redis_queue: a.name
|
||||
openapi_spec: openapi_specification
|
||||
)!
|
||||
server.run(params)
|
||||
}
|
||||
|
||||
@@ -3,9 +3,7 @@ module example_actor
|
||||
const test_port = 8101
|
||||
|
||||
pub fn test_new() ! {
|
||||
new() or {
|
||||
return error('Failed to create actor:\n${err}')
|
||||
}
|
||||
new() or { return error('Failed to create actor:\n${err}') }
|
||||
}
|
||||
|
||||
pub fn test_run() ! {
|
||||
@@ -14,4 +12,4 @@ pub fn test_run() ! {
|
||||
|
||||
pub fn test_run_server() ! {
|
||||
spawn run_server(port: test_port)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
module example_actor
|
||||
|
||||
pub fn (mut a ExampleActor) handle(method string, data string) !string {
|
||||
return data
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
@@ -1 +1 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import example_actor
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -w -n -enable-globals run
|
||||
|
||||
import os
|
||||
import time
|
||||
@@ -6,66 +6,66 @@ import veb
|
||||
import json
|
||||
import x.json2
|
||||
import net.http
|
||||
import freeflowuniverse.herolib.web.openapi {Server, Context, Request, Response}
|
||||
import freeflowuniverse.herolib.hero.processor {Processor, ProcedureCall, ProcedureResponse, ProcessParams}
|
||||
import freeflowuniverse.herolib.clients.redisclient
|
||||
import freeflowuniverse.herolib.web.openapi
|
||||
import freeflowuniverse.herolib.hero.processor
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
@[heap]
|
||||
struct Actor {
|
||||
mut:
|
||||
rpc redisclient.RedisRpc
|
||||
data_store DataStore
|
||||
rpc redisclient.RedisRpc
|
||||
data_store DataStore
|
||||
}
|
||||
|
||||
pub struct DataStore {
|
||||
mut:
|
||||
pets map[int]Pet
|
||||
orders map[int]Order
|
||||
users map[int]User
|
||||
pets map[int]Pet
|
||||
orders map[int]Order
|
||||
users map[int]User
|
||||
}
|
||||
|
||||
struct Pet {
|
||||
id int
|
||||
name string
|
||||
tag string
|
||||
id int
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
struct Order {
|
||||
id int
|
||||
pet_id int
|
||||
quantity int
|
||||
ship_date string
|
||||
status string
|
||||
complete bool
|
||||
id int
|
||||
pet_id int
|
||||
quantity int
|
||||
ship_date string
|
||||
status string
|
||||
complete bool
|
||||
}
|
||||
|
||||
struct User {
|
||||
id int
|
||||
username string
|
||||
email string
|
||||
phone string
|
||||
id int
|
||||
username string
|
||||
email string
|
||||
phone string
|
||||
}
|
||||
|
||||
// Entry point for the actor
|
||||
fn main() {
|
||||
mut redis := redisclient.new('localhost:6379') or {panic(err)}
|
||||
mut rpc := redis.rpc_get('procedure_queue')
|
||||
mut redis := redisclient.new('localhost:6379') or { panic(err) }
|
||||
mut rpc := redis.rpc_get('procedure_queue')
|
||||
|
||||
mut actor := Actor{
|
||||
rpc: rpc
|
||||
data_store: DataStore{}
|
||||
}
|
||||
mut actor := Actor{
|
||||
rpc: rpc
|
||||
data_store: DataStore{}
|
||||
}
|
||||
|
||||
actor.listen() or {panic(err)}
|
||||
actor.listen() or { panic(err) }
|
||||
}
|
||||
|
||||
// Actor listens to the Redis queue for method invocations
|
||||
fn (mut actor Actor) listen() ! {
|
||||
println('Actor started and listening for tasks...')
|
||||
for {
|
||||
actor.rpc.process(actor.handle_method)!
|
||||
time.sleep(time.millisecond * 100) // Prevent CPU spinning
|
||||
}
|
||||
println('Actor started and listening for tasks...')
|
||||
for {
|
||||
actor.rpc.process(actor.handle_method)!
|
||||
time.sleep(time.millisecond * 100) // Prevent CPU spinning
|
||||
}
|
||||
}
|
||||
|
||||
// Handle method invocations
|
||||
@@ -142,73 +142,80 @@ fn (mut actor Actor) handle_method(cmd string, data string) !string {
|
||||
|
||||
@[params]
|
||||
pub struct ListPetParams {
|
||||
limit u32
|
||||
limit u32
|
||||
}
|
||||
|
||||
// DataStore methods for managing data
|
||||
fn (mut store DataStore) list_pets(params ListPetParams) []Pet {
|
||||
if params.limit > 0 {
|
||||
if params.limit >= store.pets.values().len {
|
||||
return store.pets.values()
|
||||
}
|
||||
return store.pets.values()[..params.limit]
|
||||
}
|
||||
return store.pets.values()
|
||||
if params.limit > 0 {
|
||||
if params.limit >= store.pets.values().len {
|
||||
return store.pets.values()
|
||||
}
|
||||
return store.pets.values()[..params.limit]
|
||||
}
|
||||
return store.pets.values()
|
||||
}
|
||||
|
||||
fn (mut store DataStore) create_pet(new_pet NewPet) Pet {
|
||||
id := store.pets.keys().len + 1
|
||||
pet := Pet{id: id, name: new_pet.name, tag: new_pet.tag}
|
||||
store.pets[id] = pet
|
||||
return pet
|
||||
id := store.pets.keys().len + 1
|
||||
pet := Pet{
|
||||
id: id
|
||||
name: new_pet.name
|
||||
tag: new_pet.tag
|
||||
}
|
||||
store.pets[id] = pet
|
||||
return pet
|
||||
}
|
||||
|
||||
fn (mut store DataStore) get_pet(id int) !Pet {
|
||||
return store.pets[id] or {
|
||||
return error('Pet with id ${id} not found.')
|
||||
}
|
||||
return store.pets[id] or { return error('Pet with id ${id} not found.') }
|
||||
}
|
||||
|
||||
fn (mut store DataStore) delete_pet(id int) ! {
|
||||
if id in store.pets {
|
||||
store.pets.delete(id)
|
||||
return
|
||||
}
|
||||
return error('Pet not found')
|
||||
if id in store.pets {
|
||||
store.pets.delete(id)
|
||||
return
|
||||
}
|
||||
return error('Pet not found')
|
||||
}
|
||||
|
||||
fn (mut store DataStore) list_orders() []Order {
|
||||
return store.orders.values()
|
||||
return store.orders.values()
|
||||
}
|
||||
|
||||
fn (mut store DataStore) get_order(id int) !Order {
|
||||
return store.orders[id] or { none }
|
||||
return store.orders[id] or { none }
|
||||
}
|
||||
|
||||
fn (mut store DataStore) delete_order(id int) ! {
|
||||
if id in store.orders {
|
||||
store.orders.delete(id)
|
||||
return
|
||||
}
|
||||
return error('Order not found')
|
||||
if id in store.orders {
|
||||
store.orders.delete(id)
|
||||
return
|
||||
}
|
||||
return error('Order not found')
|
||||
}
|
||||
|
||||
fn (mut store DataStore) create_user(new_user NewUser) User {
|
||||
id := store.users.keys().len + 1
|
||||
user := User{id: id, username: new_user.username, email: new_user.email, phone: new_user.phone}
|
||||
store.users[id] = user
|
||||
return user
|
||||
id := store.users.keys().len + 1
|
||||
user := User{
|
||||
id: id
|
||||
username: new_user.username
|
||||
email: new_user.email
|
||||
phone: new_user.phone
|
||||
}
|
||||
store.users[id] = user
|
||||
return user
|
||||
}
|
||||
|
||||
// NewPet struct for creating a pet
|
||||
struct NewPet {
|
||||
name string
|
||||
tag string
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
// NewUser struct for creating a user
|
||||
struct NewUser {
|
||||
username string
|
||||
email string
|
||||
phone string
|
||||
username string
|
||||
email string
|
||||
phone string
|
||||
}
|
||||
|
||||
@@ -1,141 +1,138 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run
|
||||
#!/usr/bin/env -S v -w -n -enable-globals run
|
||||
|
||||
import os
|
||||
import time
|
||||
import veb
|
||||
import json
|
||||
import x.json2 {Any}
|
||||
import x.json2 { Any }
|
||||
import net.http
|
||||
import freeflowuniverse.herolib.data.jsonschema {Schema}
|
||||
import freeflowuniverse.herolib.web.openapi {Server, Context, Request, Response}
|
||||
import freeflowuniverse.herolib.hero.processor {Processor, ProcedureCall, ProcedureResponse, ProcessParams}
|
||||
import freeflowuniverse.herolib.clients.redisclient
|
||||
import freeflowuniverse.herolib.data.jsonschema { Schema }
|
||||
import freeflowuniverse.herolib.web.openapi { Context, Request, Response, Server }
|
||||
import freeflowuniverse.herolib.hero.processor { ProcedureCall, ProcessParams, Processor }
|
||||
import freeflowuniverse.herolib.core.redisclient
|
||||
|
||||
const spec_path = '${os.dir(@FILE)}/data/openapi.json'
|
||||
const spec_json = os.read_file(spec_path) or { panic(err) }
|
||||
|
||||
// Main function to start the server
|
||||
fn main() {
|
||||
// Initialize the Redis client and RPC mechanism
|
||||
mut redis := redisclient.new('localhost:6379')!
|
||||
mut rpc := redis.rpc_get('procedure_queue')
|
||||
// Initialize the Redis client and RPC mechanism
|
||||
mut redis := redisclient.new('localhost:6379')!
|
||||
mut rpc := redis.rpc_get('procedure_queue')
|
||||
|
||||
// Initialize the server
|
||||
mut server := &Server{
|
||||
specification: openapi.json_decode(spec_json)!
|
||||
handler: Handler{
|
||||
processor: Processor{
|
||||
rpc: rpc
|
||||
}
|
||||
}
|
||||
}
|
||||
// Initialize the server
|
||||
mut server := &Server{
|
||||
specification: openapi.json_decode(spec_json)!
|
||||
handler: Handler{
|
||||
processor: Processor{
|
||||
rpc: rpc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start the server
|
||||
veb.run[Server, Context](mut server, 8080)
|
||||
// Start the server
|
||||
veb.run[Server, Context](mut server, 8080)
|
||||
}
|
||||
|
||||
pub struct Handler {
|
||||
mut:
|
||||
processor Processor
|
||||
mut:
|
||||
processor Processor
|
||||
}
|
||||
|
||||
fn (mut handler Handler) handle(request Request) !Response {
|
||||
// Convert incoming OpenAPI request to a procedure call
|
||||
mut params := []string{}
|
||||
// Convert incoming OpenAPI request to a procedure call
|
||||
mut params := []string{}
|
||||
|
||||
if request.arguments.len > 0 {
|
||||
params = request.arguments.values().map(it.str()).clone()
|
||||
}
|
||||
if request.arguments.len > 0 {
|
||||
params = request.arguments.values().map(it.str()).clone()
|
||||
}
|
||||
|
||||
if request.body != '' {
|
||||
params << request.body
|
||||
}
|
||||
if request.body != '' {
|
||||
params << request.body
|
||||
}
|
||||
|
||||
if request.parameters.len != 0 {
|
||||
mut param_map := map[string]Any{} // Store parameters with correct types
|
||||
if request.parameters.len != 0 {
|
||||
mut param_map := map[string]Any{} // Store parameters with correct types
|
||||
|
||||
for param_name, param_value in request.parameters {
|
||||
operation_param := request.operation.parameters.filter(it.name == param_name)
|
||||
if operation_param.len > 0 {
|
||||
param_schema := operation_param[0].schema as Schema
|
||||
param_type := param_schema.typ
|
||||
param_format := param_schema.format
|
||||
for param_name, param_value in request.parameters {
|
||||
operation_param := request.operation.parameters.filter(it.name == param_name)
|
||||
if operation_param.len > 0 {
|
||||
param_schema := operation_param[0].schema as Schema
|
||||
param_type := param_schema.typ
|
||||
param_format := param_schema.format
|
||||
|
||||
// Convert parameter value to corresponding type
|
||||
match param_type {
|
||||
'integer' {
|
||||
match param_format {
|
||||
'int32' {
|
||||
param_map[param_name] = param_value.int() // Convert to int
|
||||
}
|
||||
'int64' {
|
||||
param_map[param_name] = param_value.i64() // Convert to i64
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value.int() // Default to int
|
||||
}
|
||||
}
|
||||
}
|
||||
'string' {
|
||||
param_map[param_name] = param_value // Already a string
|
||||
}
|
||||
'boolean' {
|
||||
param_map[param_name] = param_value.bool() // Convert to bool
|
||||
}
|
||||
'number' {
|
||||
match param_format {
|
||||
'float' {
|
||||
param_map[param_name] = param_value.f32() // Convert to float
|
||||
}
|
||||
'double' {
|
||||
param_map[param_name] = param_value.f64() // Convert to double
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value.f64() // Default to double
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value // Leave as string for unknown types
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the parameter is not defined in the OpenAPI operation, skip or log it
|
||||
println('Unknown parameter: $param_name')
|
||||
}
|
||||
}
|
||||
// Convert parameter value to corresponding type
|
||||
match param_type {
|
||||
'integer' {
|
||||
match param_format {
|
||||
'int32' {
|
||||
param_map[param_name] = param_value.int() // Convert to int
|
||||
}
|
||||
'int64' {
|
||||
param_map[param_name] = param_value.i64() // Convert to i64
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value.int() // Default to int
|
||||
}
|
||||
}
|
||||
}
|
||||
'string' {
|
||||
param_map[param_name] = param_value // Already a string
|
||||
}
|
||||
'boolean' {
|
||||
param_map[param_name] = param_value.bool() // Convert to bool
|
||||
}
|
||||
'number' {
|
||||
match param_format {
|
||||
'float' {
|
||||
param_map[param_name] = param_value.f32() // Convert to float
|
||||
}
|
||||
'double' {
|
||||
param_map[param_name] = param_value.f64() // Convert to double
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value.f64() // Default to double
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
param_map[param_name] = param_value // Leave as string for unknown types
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the parameter is not defined in the OpenAPI operation, skip or log it
|
||||
println('Unknown parameter: ${param_name}')
|
||||
}
|
||||
}
|
||||
|
||||
// Encode the parameter map to JSON if needed
|
||||
params << json.encode(param_map.str())
|
||||
}
|
||||
// Encode the parameter map to JSON if needed
|
||||
params << json.encode(param_map.str())
|
||||
}
|
||||
|
||||
call := ProcedureCall{
|
||||
method: request.operation.operation_id
|
||||
params: "[${params.join(',')}]" // Keep as a string since ProcedureCall expects a string
|
||||
}
|
||||
call := ProcedureCall{
|
||||
method: request.operation.operation_id
|
||||
params: '[${params.join(',')}]' // Keep as a string since ProcedureCall expects a string
|
||||
}
|
||||
|
||||
// Process the procedure call
|
||||
procedure_response := handler.processor.process(
|
||||
call,
|
||||
ProcessParams{
|
||||
timeout: 30 // Set timeout in seconds
|
||||
}
|
||||
) or {
|
||||
// Handle ProcedureError
|
||||
if err is processor.ProcedureError {
|
||||
return Response{
|
||||
status: http.status_from_int(err.code()) // Map ProcedureError reason to HTTP status code
|
||||
body: json.encode({
|
||||
'error': err.msg()
|
||||
})
|
||||
}
|
||||
}
|
||||
return error('Unexpected error: $err')
|
||||
}
|
||||
// Process the procedure call
|
||||
procedure_response := handler.processor.process(call, ProcessParams{
|
||||
timeout: 30 // Set timeout in seconds
|
||||
}) or {
|
||||
// Handle ProcedureError
|
||||
if err is processor.ProcedureError {
|
||||
return Response{
|
||||
status: http.status_from_int(err.code()) // Map ProcedureError reason to HTTP status code
|
||||
body: json.encode({
|
||||
'error': err.msg()
|
||||
})
|
||||
}
|
||||
}
|
||||
return error('Unexpected error: ${err}')
|
||||
}
|
||||
|
||||
// Convert returned procedure response to OpenAPI response
|
||||
return Response{
|
||||
status: http.Status.ok // Assuming success if no error
|
||||
body: procedure_response.result
|
||||
}
|
||||
// Convert returned procedure response to OpenAPI response
|
||||
return Response{
|
||||
status: http.Status.ok // Assuming success if no error
|
||||
body: procedure_response.result
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user