diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 847eca95..6cc013db 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -2,9 +2,9 @@ name: Deploy Documentation to Pages on: push: - branches: ["main"] + branches: ["development"] workflow_dispatch: - branches: ["main"] + branches: ["development"] permissions: contents: read diff --git a/.github/workflows/hero_build_all.yml b/.github/workflows/hero_build_all.yml new file mode 100644 index 00000000..928a66fb --- /dev/null +++ b/.github/workflows/hero_build_all.yml @@ -0,0 +1,80 @@ +name: Build Hero on Linux & Run tests + +permissions: + contents: write + +on: + push: + branches: ["main","development"] + workflow_dispatch: + branches: ["main","development"] + +jobs: + build: + strategy: + matrix: + include: + - target: x86_64-unknown-linux-musl + os: ubuntu-latest + short-name: linux-i64 + - target: aarch64-unknown-linux-musl + os: ubuntu-latest + short-name: linux-arm64 + - target: aarch64-apple-darwin + os: macos-latest + short-name: macos-arm64 + - target: x86_64-apple-darwin + os: macos-13 + short-name: macos-i64 + runs-on: ${{ matrix.os }} + steps: + - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" + - run: echo "🔎 The name of your branch is ${{ github.ref_name }} and your repository is ${{ github.repository }}." + + - name: Check out repository code + uses: actions/checkout@v3 + + - name: Setup V & Herolib + run: ./install_v.sh --herolib + + - name: Do all the basic tests + run: ./test_basic.vsh + + - name: Build Hero + run: | + v -w -d use_openssl -enable-globals cli/hero.v -o cli/hero-${{ matrix.target }} + + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: hero-${{ matrix.target }} + path: cli/hero-${{ matrix.target }} + + release_hero: + needs: upload + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Check out repository code + uses: actions/checkout@v4 + + - name: Download Artifacts + uses: actions/download-artifact@v4 + with: + path: cli/bins + merge-multiple: true + + - name: Release + uses: softprops/action-gh-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref }} + name: Release ${{ github.ref_name }} + draft: false + fail_on_unmatched_files: true + # body: ${{ steps.changelog.outputs.changelog }} + files: cli/bins/* diff --git a/.github/workflows/hero_build_linux.yml b/.github/workflows/hero_build_linux.yml new file mode 100644 index 00000000..4c8a7b38 --- /dev/null +++ b/.github/workflows/hero_build_linux.yml @@ -0,0 +1,32 @@ +name: Build Hero on Linux & Run tests + +permissions: + contents: write + +on: + push: + workflow_dispatch: + +jobs: + build: + strategy: + matrix: + include: + - target: x86_64-unknown-linux-musl + os: ubuntu-latest + short-name: linux-i64 + runs-on: ${{ matrix.os }} + steps: + - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" + - run: echo "🔎 The name of your branch is ${{ github.ref_name }} and your repository is ${{ github.repository }}." + + - name: Check out repository code + uses: actions/checkout@v3 + + - name: Setup V & Herolib + run: ./install_v.sh --herolib + + - name: Do all the basic tests + run: ./test_basic.vsh + diff --git a/README.md b/README.md index 47ccd727..364e4d36 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,19 @@ # herolib -a smaller version of herolib with only the items we need for hero -> [documentation here](https://freeflowuniverse.github.io/herolib/) +> [documentation of the library](https://freeflowuniverse.github.io/herolib/) -## automated install +## hero install for users + +```bash +curl https://raw.githubusercontent.com/freeflowuniverse/herolib/refs/heads/development_kristof10/install_hero.sh > /tmp/install_hero.sh +bash /tmp/install_hero.sh + +``` + +this tool can be used to work with git, build books, play with hero AI, ... + +## automated install for developers ```bash curl 'https://raw.githubusercontent.com/freeflowuniverse/herolib/refs/heads/main/install_v.sh' > /tmp/install_v.sh @@ -16,7 +25,7 @@ bash /tmp/install_v.sh --analyzer --herolib ```bash -#~/code/github/freeflowuniverse/herolib/install_v.sh --help +~/code/github/freeflowuniverse/herolib/install_v.sh --help V & HeroLib Installer Script diff --git a/aiprompts/starter/0_start_here.md b/aiprompts/starter/0_start_here.md index d1032ad8..f3c811b1 100644 --- a/aiprompts/starter/0_start_here.md +++ b/aiprompts/starter/0_start_here.md @@ -11,7 +11,7 @@ when I generate vlang scripts I will always use .vsh extension and use following as first line: ``` -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run ``` - a .vsh is a v shell script and can be executed as is, no need to use v ... @@ -21,7 +21,7 @@ when I generate vlang scripts I will always use .vsh extension and use following ## to do argument parsing use following examples ```v -#!/usr/bin/env -S v -n -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run import os import flag diff --git a/aiprompts/v_manual_advanced.md b/aiprompts/v_manual_advanced.md index 84b214a3..f091d19b 100644 --- a/aiprompts/v_manual_advanced.md +++ b/aiprompts/v_manual_advanced.md @@ -2238,7 +2238,7 @@ be faster, since there is no need for a re-compilation of a script, that has not An example `deploy.vsh`: ```v oksyntax -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run // Note: The shebang line above, associates the .vsh file to V on Unix-like systems, // so it can be run just by specifying the path to the .vsh file, once it's made @@ -2300,11 +2300,11 @@ Whilst V does normally not allow vsh scripts without the designated file extensi to circumvent this rule and have a file with a fully custom name and shebang. Whilst this feature exists it is only recommended for specific usecases like scripts that will be put in the path and should **not** be used for things like build or deploy scripts. To access this feature start the -file with `#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +file with `#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run the built executable. This will run in crun mode so it will only rebuild if changes to the script were made and keep the binary as `tmp.`. **Caution**: if this filename already exists the file will be overridden. If you want to rebuild each time and not keep this binary -instead use `#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +instead use `#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run # Appendices diff --git a/aiprompts/vshell example script instructions.md b/aiprompts/vshell example script instructions.md index cfd2956b..a45e7c41 100644 --- a/aiprompts/vshell example script instructions.md +++ b/aiprompts/vshell example script instructions.md @@ -3,7 +3,7 @@ this is how we want example scripts to be, see the first line ```vlang -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.sysadmintools.daguserver diff --git a/cli/compile.vsh b/cli/compile.vsh index 4dedb63e..cf66f026 100755 --- a/cli/compile.vsh +++ b/cli/compile.vsh @@ -1,5 +1,5 @@ #!/usr/bin/env -S v -n -w -parallel-cc -enable-globals run -// #!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +// #!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import flag @@ -45,7 +45,7 @@ compile_cmd := if os.user_os() == 'macos' { if prod_mode { 'v -enable-globals -w -n -prod hero.v' } else { - 'v -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals hero.v' + 'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals hero.v' } } else { if prod_mode { diff --git a/cli/compile_upload.vsh b/cli/compile_upload.vsh index 07c0841a..7a9ed988 100755 --- a/cli/compile_upload.vsh +++ b/cli/compile_upload.vsh @@ -89,5 +89,9 @@ fn hero_upload() ! { } fn main() { + //os.execute_or_panic('${os.home_dir()}/code/github/freeflowuniverse/herolib/cli/compile.vsh -p') + println("compile hero can take 60 sec+ on osx.") + os.execute_or_panic('${os.home_dir()}/code/github/freeflowuniverse/herolib/cli/compile.vsh -p') + println( "upload:") hero_upload() or { eprintln(err) exit(1) } } diff --git a/cli/hero.v b/cli/hero.v index ed1dd6d9..903add9d 100644 --- a/cli/hero.v +++ b/cli/hero.v @@ -31,7 +31,7 @@ fn do() ! { mut cmd := Command{ name: 'hero' description: 'Your HERO toolset.' - version: '2.0.0' + version: '2.0.6' } // herocmds.cmd_run_add_flags(mut cmd) @@ -81,6 +81,7 @@ fn do() ! { // herocmds.cmd_zola(mut cmd) // herocmds.cmd_juggler(mut cmd) herocmds.cmd_generator(mut cmd) + herocmds.cmd_docusaurus(mut cmd) // herocmds.cmd_docsorter(mut cmd) // cmd.add_command(publishing.cmd_publisher(pre_func)) cmd.setup() diff --git a/doc.vsh b/doc.vsh index 010c9a03..b16ad984 100755 --- a/doc.vsh +++ b/doc.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os diff --git a/docker/docusaurus/.gitignore b/docker/docusaurus/.gitignore deleted file mode 100644 index bb44cec5..00000000 --- a/docker/docusaurus/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.bash_history -.openvscode-server/ -.cache/ \ No newline at end of file diff --git a/docker/docusaurus/Dockerfile b/docker/docusaurus/Dockerfile deleted file mode 100644 index 2e313515..00000000 --- a/docker/docusaurus/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -# Use Ubuntu 24.04 as the base image -FROM ubuntu:24.04 - -# Set the working directory -WORKDIR /root - -# Copy local installation scripts into the container -COPY scripts/install_v.sh /tmp/install_v.sh -COPY scripts/install_herolib.vsh /tmp/install_herolib.vsh -COPY scripts/install_vscode.sh /tmp/install_vscode.sh -COPY scripts/ourinit.sh /usr/local/bin/ - -# Make the scripts executable -RUN chmod +x /tmp/install_v.sh /tmp/install_herolib.vsh - -RUN apt-get update && apt-get install -y \ - curl bash sudo mc wget tmux htop openssh-server - -RUN bash /tmp/install_v.sh - -RUN yes y | bash /tmp/install_v.sh --analyzer - -RUN bash /tmp/install_vscode.sh - -RUN /tmp/install_herolib.vsh && \ - mkdir -p /var/run/sshd && \ - echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config && \ - echo 'PasswordAuthentication no' >> /etc/ssh/sshd_config && \ - chown -R root:root /root/.ssh && \ - chmod -R 700 /root/.ssh/ && \ - chmod 600 /root/.ssh/authorized_keys && \ - service ssh start && \ - apt-get clean && \ - echo "PS1='HERO: \w \$ '" >> ~/.bashrc \ - rm -rf /var/lib/apt/lists/* - -#SSH -RUN mkdir -p /var/run/sshd && \ - echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config && \ - echo 'PasswordAuthentication no' >> /etc/ssh/sshd_config && \ - chown -R root:root /root/.ssh && \ - chmod -R 700 /root/.ssh/ && \ - chmod 600 /root/.ssh/authorized_keys && \ - service ssh start - -ENTRYPOINT ["/bin/bash"] -CMD ["/bin/bash"] - diff --git a/docker/docusaurus/build.sh b/docker/docusaurus/build.sh deleted file mode 100755 index 92aec508..00000000 --- a/docker/docusaurus/build.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -e - -# Get the directory where the script is located -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$SCRIPT_DIR" - -# Docker image and container names -DOCKER_IMAGE_NAME="docusaurus" -DEBUG_CONTAINER_NAME="herolib" - -function cleanup { - if docker ps -aq -f name="$DEBUG_CONTAINER_NAME" &>/dev/null; then - echo "Cleaning up leftover debug container..." - docker rm -f "$DEBUG_CONTAINER_NAME" &>/dev/null || true - fi -} -trap cleanup EXIT - -# Attempt to build the Docker image -BUILD_LOG=$(mktemp) -set +e -docker build --name herolib --progress=plain -t "$DOCKER_IMAGE_NAME" . -BUILD_EXIT_CODE=$? -set -e - -# Handle build failure -if [ $BUILD_EXIT_CODE -ne 0 ]; then - echo -e "\\n[ERROR] Docker build failed.\n" - echo -e "remove the part which didn't build in the Dockerfile, the run again and to debug do:" - echo docker run --name herolib -it --entrypoint=/bin/bash "herolib" - exit $BUILD_EXIT_CODE -else - echo -e "\\n[INFO] Docker build completed successfully." -fi - - diff --git a/docker/docusaurus/debug.sh b/docker/docusaurus/debug.sh deleted file mode 100755 index 513ab889..00000000 --- a/docker/docusaurus/debug.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -ex - -# Get the directory where the script is located -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -cd "$SCRIPT_DIR" - -# Remove any existing container named 'debug' (ignore errors) -docker rm -f herolib > /dev/null 2>&1 - -docker run --name herolib -it \ - --entrypoint="/usr/local/bin/ourinit.sh" \ - -v "${SCRIPT_DIR}/scripts:/scripts" \ - -v "$HOME/code:/root/code" \ - -p 4100:8100 \ - -p 4101:8101 \ - -p 4102:8102 \ - -p 4379:6379 \ - -p 4022:22 \ - -p 4000:3000 herolib diff --git a/docker/docusaurus/docker-compose.yml b/docker/docusaurus/docker-compose.yml deleted file mode 100644 index 9cc53871..00000000 --- a/docker/docusaurus/docker-compose.yml +++ /dev/null @@ -1,34 +0,0 @@ -services: - postgres: - image: postgres:latest - container_name: postgres_service - environment: - POSTGRES_USER: postgres - POSTGRES_PASSWORD: planetfirst - POSTGRES_DB: mydb - ports: - - "5432:5432" - volumes: - - postgres_data:/var/lib/postgresql/data - herolib: - build: - context: . - dockerfile: Dockerfile - image: herolib:latest - container_name: herolib - volumes: - - ~/code:/root/code - stdin_open: true - tty: true - ports: - - "4100:8100" - - "4101:8101" - - "4102:8102" - - "4379:6379" - - "4000:3000" - - "4022:22" - command: ["/usr/local/bin/ourinit.sh"] -volumes: - postgres_data: - - diff --git a/docker/docusaurus/scripts/install_vscode.sh b/docker/docusaurus/scripts/install_vscode.sh deleted file mode 100755 index 1c521cbe..00000000 --- a/docker/docusaurus/scripts/install_vscode.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash -e - -# Set version and file variables -OPENVSCODE_SERVER_VERSION="1.97.0" -TMP_DIR="/tmp" -FILENAME="openvscode.tar.gz" -FILE_PATH="$TMP_DIR/$FILENAME" -INSTALL_DIR="/opt/openvscode" -BIN_PATH="/usr/local/bin/openvscode-server" -TMUX_SESSION="openvscode-server" - -# Function to detect architecture -get_architecture() { - ARCH=$(uname -m) - case "$ARCH" in - x86_64) - echo "x64" - ;; - aarch64) - echo "arm64" - ;; - *) - echo "Unsupported architecture: $ARCH" >&2 - exit 1 - ;; - esac -} - -# Check if OpenVSCode Server is already installed -if [ -d "$INSTALL_DIR" ] && [ -x "$BIN_PATH" ]; then - echo "OpenVSCode Server is already installed at $INSTALL_DIR. Skipping download and installation." -else - # Determine architecture-specific URL - ARCH=$(get_architecture) - if [ "$ARCH" == "x64" ]; then - DOWNLOAD_URL="https://github.com/gitpod-io/openvscode-server/releases/download/openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}/openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}-linux-x64.tar.gz" - elif [ "$ARCH" == "arm64" ]; then - DOWNLOAD_URL="https://github.com/gitpod-io/openvscode-server/releases/download/openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}/openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}-linux-arm64.tar.gz" - fi - - # Navigate to temporary directory - cd "$TMP_DIR" - - # Remove existing file if it exists - if [ -f "$FILE_PATH" ]; then - rm -f "$FILE_PATH" - fi - - # Download file using curl - curl -L "$DOWNLOAD_URL" -o "$FILE_PATH" - - # Verify file size is greater than 40 MB (40 * 1024 * 1024 bytes) - FILE_SIZE=$(stat -c%s "$FILE_PATH") - if [ "$FILE_SIZE" -le $((40 * 1024 * 1024)) ]; then - echo "Error: Downloaded file size is less than 40 MB." >&2 - exit 1 - fi - - # Extract the tar.gz file - EXTRACT_DIR="openvscode-server-insiders-v${OPENVSCODE_SERVER_VERSION}-linux-${ARCH}" - tar -xzf "$FILE_PATH" - - # Move the extracted directory to the install location - if [ -d "$INSTALL_DIR" ]; then - rm -rf "$INSTALL_DIR" - fi - mv "$EXTRACT_DIR" "$INSTALL_DIR" - - # Create a symlink for easy access - ln -sf "$INSTALL_DIR/bin/openvscode-server" "$BIN_PATH" - - # Verify installation - if ! command -v openvscode-server >/dev/null 2>&1; then - echo "Error: Failed to create symlink for openvscode-server." >&2 - exit 1 - fi - - # Install default plugins - PLUGINS=("ms-python.python" "esbenp.prettier-vscode" "saoudrizwan.claude-dev" "yzhang.markdown-all-in-one" "ms-vscode-remote.remote-ssh" "ms-vscode.remote-explorer" "charliermarsh.ruff" "qwtel.sqlite-viewer" "vosca.vscode-v-analyzer" "tomoki1207.pdf") - for PLUGIN in "${PLUGINS[@]}"; do - "$INSTALL_DIR/bin/openvscode-server" --install-extension "$PLUGIN" - done - - echo "Default plugins installed: ${PLUGINS[*]}" - - # Clean up temporary directory - if [ -d "$TMP_DIR" ]; then - find "$TMP_DIR" -maxdepth 1 -type f -name "openvscode*" -exec rm -f {} \; - fi -fi - -# Start OpenVSCode Server in a tmux session -if tmux has-session -t "$TMUX_SESSION" 2>/dev/null; then - tmux kill-session -t "$TMUX_SESSION" -fi -tmux new-session -d -s "$TMUX_SESSION" "$INSTALL_DIR/bin/openvscode-server" - -echo "OpenVSCode Server is running in a tmux session named '$TMUX_SESSION'." diff --git a/docker/docusaurus/scripts/ourinit.sh b/docker/docusaurus/scripts/ourinit.sh deleted file mode 100755 index 14094c65..00000000 --- a/docker/docusaurus/scripts/ourinit.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -e - -# redis-server --daemonize yes - -# TMUX_SESSION="vscode" -# # Start OpenVSCode Server in a tmux session -# if tmux has-session -t "$TMUX_SESSION" 2>/dev/null; then -# tmux kill-session -t "$TMUX_SESSION" -# fi -# tmux new-session -d -s "$TMUX_SESSION" "/usr/local/bin/openvscode-server --host 0.0.0.0 --without-connection-token" - -# service ssh start - -exec /bin/bash diff --git a/docker/docusaurus/shell.sh b/docker/docusaurus/shell.sh deleted file mode 100755 index 0d04b270..00000000 --- a/docker/docusaurus/shell.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -e - -# Get the directory where the script is located -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$SCRIPT_DIR" - -CONTAINER_NAME="herolib" -TARGET_PORT=4000 - -# Function to check if a container is running -is_container_running() { - docker ps --filter "name=$CONTAINER_NAME" --filter "status=running" -q -} - -# Function to check if a port is accessible -is_port_accessible() { - nc -zv 127.0.0.1 "$1" &>/dev/null -} - -# Check if the container exists and is running -if ! is_container_running; then - echo "Container $CONTAINER_NAME is not running." - - # Check if the container exists but is stopped - if docker ps -a --filter "name=$CONTAINER_NAME" -q | grep -q .; then - echo "Starting existing container $CONTAINER_NAME..." - docker start "$CONTAINER_NAME" - else - echo "Container $CONTAINER_NAME does not exist. Attempting to start with start.sh..." - if [[ -f "$SCRIPT_DIR/start.sh" ]]; then - bash "$SCRIPT_DIR/start.sh" - else - echo "Error: start.sh not found in $SCRIPT_DIR." - exit 1 - fi - fi - - # Wait for the container to be fully up - sleep 5 -fi - -# Verify the container is running -if ! is_container_running; then - echo "Error: Failed to start container $CONTAINER_NAME." - exit 1 -fi -echo "Container $CONTAINER_NAME is running." - -# Check if the target port is accessible -if is_port_accessible "$TARGET_PORT"; then - echo "Port $TARGET_PORT is accessible." -else - echo "Port $TARGET_PORT is not accessible. Please check the service inside the container." -fi - -# Enter the container -echo -echo " ** WE NOW LOGIN TO THE CONTAINER ** " -echo -docker exec -it herolib bash - diff --git a/docker/docusaurus/ssh.sh b/docker/docusaurus/ssh.sh deleted file mode 100755 index 776695cf..00000000 --- a/docker/docusaurus/ssh.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -e - -ssh root@localhost -p 4022 diff --git a/docker/docusaurus/ssh_init.sh b/docker/docusaurus/ssh_init.sh deleted file mode 100755 index 40bee15a..00000000 --- a/docker/docusaurus/ssh_init.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -e - -# Get the directory where the script is located -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$SCRIPT_DIR" - -# Define variables -CONTAINER_NAME="herolib" -CONTAINER_SSH_DIR="/root/.ssh" -AUTHORIZED_KEYS="authorized_keys" -TEMP_AUTH_KEYS="/tmp/authorized_keys" - -# Step 1: Create a temporary file to store public keys -> $TEMP_AUTH_KEYS # Clear the file if it exists - -# Step 2: Add public keys from ~/.ssh/ if they exist -if ls ~/.ssh/*.pub 1>/dev/null 2>&1; then - cat ~/.ssh/*.pub >> $TEMP_AUTH_KEYS -fi - -# Step 3: Check if ssh-agent is running and get public keys from it -if pgrep ssh-agent >/dev/null; then - echo "ssh-agent is running. Fetching keys..." - ssh-add -L >> $TEMP_AUTH_KEYS 2>/dev/null -else - echo "ssh-agent is not running or no keys loaded." -fi - -# Step 4: Ensure the temporary file is not empty -if [ ! -s $TEMP_AUTH_KEYS ]; then - echo "No public keys found. Exiting." - exit 1 -fi - -# Step 5: Ensure the container's SSH directory exists -docker exec -it $CONTAINER_NAME mkdir -p $CONTAINER_SSH_DIR -docker exec -it $CONTAINER_NAME chmod 700 $CONTAINER_SSH_DIR - -# Step 6: Copy the public keys into the container's authorized_keys file -docker cp $TEMP_AUTH_KEYS $CONTAINER_NAME:$CONTAINER_SSH_DIR/$AUTHORIZED_KEYS - -# Step 7: Set proper permissions for authorized_keys -docker exec -it $CONTAINER_NAME chmod 600 $CONTAINER_SSH_DIR/$AUTHORIZED_KEYS - -# Step 8: Install and start the SSH server inside the container -docker exec -it $CONTAINER_NAME bash -c " - apt-get update && - apt-get install -y openssh-server && - mkdir -p /var/run/sshd && - echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config && - echo 'PasswordAuthentication no' >> /etc/ssh/sshd_config && - chown -R root:root /root/.ssh && - chmod -R 700 /root/.ssh/ && - chmod 600 /root/.ssh/authorized_keys && - service ssh start -" - -# Step 9: Clean up temporary file on the host -rm $TEMP_AUTH_KEYS - -echo "SSH keys added and SSH server configured. You can now SSH into the container." - -ssh root@localhost -p 4022 diff --git a/docker/docusaurus/start.sh b/docker/docusaurus/start.sh deleted file mode 100644 index a584aad7..00000000 --- a/docker/docusaurus/start.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -e - -# Get the directory where the script is located -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$SCRIPT_DIR" - - - diff --git a/docker/herolib/scripts/install_herolib.vsh b/docker/herolib/scripts/install_herolib.vsh index d6cbbf3d..53524905 100755 --- a/docker/herolib/scripts/install_herolib.vsh +++ b/docker/herolib/scripts/install_herolib.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import flag @@ -64,7 +64,7 @@ os.symlink('${abs_dir_of_script}/lib', '${os.home_dir()}/.vmodules/freeflowunive println('Herolib installation completed successfully!') // Add vtest alias -addtoscript('alias vtest=', 'alias vtest=\'v -stats -enable-globals -n -w -cg -gc none -no-retry-compilation -cc tcc test\' ') or { +addtoscript('alias vtest=', 'alias vtest=\'v -stats -enable-globals -n -w -cg -gc none -cc tcc test\' ') or { eprintln('Failed to add vtest alias: ${err}') } diff --git a/docker/postgresql/docker-compose.yml b/docker/postgresql/docker-compose.yml new file mode 100644 index 00000000..f24127b3 --- /dev/null +++ b/docker/postgresql/docker-compose.yml @@ -0,0 +1,22 @@ +version: '3.9' +services: + db: + image: 'postgres:17.2-alpine3.21' + restart: always + ports: + - 5432:5432 + environment: + POSTGRES_PASSWORD: 1234 + networks: + - my_network + + adminer: + image: adminer + restart: always + ports: + - 8080:8080 + networks: + - my_network + +networks: + my_network: \ No newline at end of file diff --git a/docker/postgresql/readme.md b/docker/postgresql/readme.md new file mode 100644 index 00000000..cc3a2f33 --- /dev/null +++ b/docker/postgresql/readme.md @@ -0,0 +1,6 @@ + + +Server (Host): db (because Docker Compose creates an internal network and uses service names as hostnames) +Username: postgres (default PostgreSQL username) +Password: 1234 (as set in your POSTGRES_PASSWORD environment variable) +Database: Leave it empty or enter postgres (default database) diff --git a/docker/postgresql/start.sh b/docker/postgresql/start.sh new file mode 100755 index 00000000..9cd3f695 --- /dev/null +++ b/docker/postgresql/start.sh @@ -0,0 +1,13 @@ +#!/bin/bash -e + +# Get the directory where the script is located +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd "$SCRIPT_DIR" + +# Stop any existing containers and remove them +docker compose down + +# Start the services in detached mode +docker compose up -d + +echo "PostgreSQL is ready" diff --git a/examples/README.md b/examples/README.md index 0411a911..08842e67 100644 --- a/examples/README.md +++ b/examples/README.md @@ -34,7 +34,7 @@ The examples directory demonstrates various capabilities of HeroLib: When creating V scripts (.vsh files), always use the following shebang: ```bash -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run ``` This shebang ensures: diff --git a/examples/builder/simple.vsh b/examples/builder/simple.vsh index 72c6eb92..fe78455a 100755 --- a/examples/builder/simple.vsh +++ b/examples/builder/simple.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.builder import freeflowuniverse.herolib.core.pathlib diff --git a/examples/builder/simple_ip4.vsh b/examples/builder/simple_ip4.vsh index f4da05d8..a3a22627 100755 --- a/examples/builder/simple_ip4.vsh +++ b/examples/builder/simple_ip4.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.builder import freeflowuniverse.herolib.core.pathlib diff --git a/examples/builder/simple_ip6.vsh b/examples/builder/simple_ip6.vsh index 10d38683..ee8c118a 100755 --- a/examples/builder/simple_ip6.vsh +++ b/examples/builder/simple_ip6.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.builder import freeflowuniverse.herolib.core.pathlib diff --git a/examples/clients/mail.vsh b/examples/clients/mail.vsh new file mode 100755 index 00000000..a61074d5 --- /dev/null +++ b/examples/clients/mail.vsh @@ -0,0 +1,24 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + + +import freeflowuniverse.herolib.clients. mailclient + + +//remove the previous one, otherwise the env variables are not read +mailclient.config_delete(name:"test")! + +// env variables which need to be set are: +// - MAIL_FROM=... +// - MAIL_PASSWORD=... +// - MAIL_PORT=465 +// - MAIL_SERVER=... +// - MAIL_USERNAME=... + + +mut client:= mailclient.get(name:"test")! + +println(client) + +client.send(subject:'this is a test',to:'kristof@incubaid.com',body:' + this is my email content + ')! \ No newline at end of file diff --git a/examples/clients/psql.vsh b/examples/clients/psql.vsh new file mode 100755 index 00000000..7b64441b --- /dev/null +++ b/examples/clients/psql.vsh @@ -0,0 +1,45 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.core +import freeflowuniverse.herolib.clients.postgresql_client + + +// Configure PostgreSQL client +heroscript := " +!!postgresql_client.configure + name:'test' + user: 'postgres' + port: 5432 + host: 'localhost' + password: '1234' + dbname: 'postgres' +" + +// Process the heroscript configuration +postgresql_client.play(heroscript: heroscript)! + +// Get the configured client +mut db_client := postgresql_client.get(name: "test")! + +// Check if test database exists, create if not +if !db_client.db_exists('test')! { + println('Creating database test...') + db_client.db_create('test')! +} + +// Switch to test database +db_client.dbname = 'test' + +// Create table if not exists +create_table_sql := "CREATE TABLE IF NOT EXISTS users ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL, + email VARCHAR(255) UNIQUE NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +)" + +println('Creating table users if not exists...') +db_client.exec(create_table_sql)! + +println('Database and table setup completed successfully!') + diff --git a/examples/core/base/config_basic.vsh b/examples/core/base/config_basic.vsh index 0400babe..e19cd557 100755 --- a/examples/core/base/config_basic.vsh +++ b/examples/core/base/config_basic.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.base diff --git a/examples/core/base/config_with_template_replacement.vsh b/examples/core/base/config_with_template_replacement.vsh index 0a7de62d..bdae2f47 100755 --- a/examples/core/base/config_with_template_replacement.vsh +++ b/examples/core/base/config_with_template_replacement.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.pathlib import freeflowuniverse.herolib.core.base diff --git a/examples/core/base/core_1.vsh b/examples/core/base/core_1.vsh index fec88efc..b39face2 100755 --- a/examples/core/base/core_1.vsh +++ b/examples/core/base/core_1.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.base import freeflowuniverse.herolib.develop.gittools diff --git a/examples/core/codeparser/parse_embedded/example.vsh b/examples/core/codeparser/parse_embedded/example.vsh index 2d0a48a4..35ba80e7 100755 --- a/examples/core/codeparser/parse_embedded/example.vsh +++ b/examples/core/codeparser/parse_embedded/example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import freeflowuniverse.herolib.core.codeparser diff --git a/examples/core/db/db_do.v b/examples/core/db/db_do.v index 4088cd57..01caf274 100755 --- a/examples/core/db/db_do.v +++ b/examples/core/db/db_do.v @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run import time import freeflowuniverse.herolib.core.smartid diff --git a/examples/core/dbfs/dbfs1.vsh b/examples/core/dbfs/dbfs1.vsh index 74bdf542..045d7feb 100755 --- a/examples/core/dbfs/dbfs1.vsh +++ b/examples/core/dbfs/dbfs1.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.data.dbfs import time diff --git a/examples/core/generate.vsh b/examples/core/generate.vsh index dddb7e04..17b7a11d 100755 --- a/examples/core/generate.vsh +++ b/examples/core/generate.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.generator.installer diff --git a/examples/core/openapi/gitea/gitea_openapi.vsh b/examples/core/openapi/gitea/gitea_openapi.vsh index 0526e556..abb99d9e 100644 --- a/examples/core/openapi/gitea/gitea_openapi.vsh +++ b/examples/core/openapi/gitea/gitea_openapi.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import json diff --git a/examples/core/pathlib/examples/list/path_list.vsh b/examples/core/pathlib/examples/list/path_list.vsh index 7e635a9d..17f8b09f 100755 --- a/examples/core/pathlib/examples/list/path_list.vsh +++ b/examples/core/pathlib/examples/list/path_list.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.pathlib import os diff --git a/examples/core/pathlib/examples/md5/paths_md5.vsh b/examples/core/pathlib/examples/md5/paths_md5.vsh index 34433802..b6c59ca7 100755 --- a/examples/core/pathlib/examples/md5/paths_md5.vsh +++ b/examples/core/pathlib/examples/md5/paths_md5.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.pathlib import os diff --git a/examples/core/pathlib/examples/scanner/path_scanner.vsh b/examples/core/pathlib/examples/scanner/path_scanner.vsh index 4e6a31b9..26fcdb10 100755 --- a/examples/core/pathlib/examples/scanner/path_scanner.vsh +++ b/examples/core/pathlib/examples/scanner/path_scanner.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.pathlib import freeflowuniverse.herolib.data.paramsparser diff --git a/examples/core/pathlib/examples/sha256/paths_sha256.vsh b/examples/core/pathlib/examples/sha256/paths_sha256.vsh index 07e8de5f..ef9a1726 100755 --- a/examples/core/pathlib/examples/sha256/paths_sha256.vsh +++ b/examples/core/pathlib/examples/sha256/paths_sha256.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.pathlib import os diff --git a/examples/core/secrets_example.vsh b/examples/core/secrets_example.vsh index 0d0e8ff1..1521bd3c 100755 --- a/examples/core/secrets_example.vsh +++ b/examples/core/secrets_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.crypt.secrets diff --git a/examples/data/.gitignore b/examples/data/.gitignore new file mode 100644 index 00000000..06cf6539 --- /dev/null +++ b/examples/data/.gitignore @@ -0,0 +1 @@ +cache diff --git a/examples/data/cache.vsh b/examples/data/cache.vsh new file mode 100755 index 00000000..2e81cacc --- /dev/null +++ b/examples/data/cache.vsh @@ -0,0 +1,139 @@ +#!/usr/bin/env -S v run + +// Example struct to cache +import freeflowuniverse.herolib.data.cache +import time + +@[heap] +struct User { + id u32 + name string + age int +} + +fn main() { + // Create a cache with custom configuration + config := cache.CacheConfig{ + max_entries: 1000 // Maximum number of entries + max_size_mb: 10.0 // Maximum cache size in MB + ttl_seconds: 300 // Items expire after 5 minutes + eviction_ratio: 0.2 // Evict 20% of entries when full + } + + mut user_cache := cache.new_cache[User](config) + + // Create some example users + user1 := &User{ + id: 1 + name: 'Alice' + age: 30 + } + + user2 := &User{ + id: 2 + name: 'Bob' + age: 25 + } + + // Add users to cache + println('Adding users to cache...') + user_cache.set(user1.id, user1) + user_cache.set(user2.id, user2) + + // Retrieve users from cache + println('\nRetrieving users from cache:') + if cached_user1 := user_cache.get(1) { + println('Found user 1: ${cached_user1.name}, age ${cached_user1.age}') + } + + if cached_user2 := user_cache.get(2) { + println('Found user 2: ${cached_user2.name}, age ${cached_user2.age}') + } + + // Try to get non-existent user + println('\nTrying to get non-existent user:') + if user := user_cache.get(999) { + println('Found user: ${user.name}') + } else { + println('User not found in cache') + } + + // Demonstrate cache stats + println('\nCache statistics:') + println('Number of entries: ${user_cache.len()}') + + // Clear the cache + println('\nClearing cache...') + user_cache.clear() + println('Cache entries after clear: ${user_cache.len()}') + + // Demonstrate max entries limit + println('\nDemonstrating max entries limit (adding 2000 entries):') + println('Initial cache size: ${user_cache.len()}') + + for i := u32(0); i < 2000; i++ { + user := &User{ + id: i + name: 'User${i}' + age: 20 + int(i % 50) + } + user_cache.set(i, user) + + if i % 200 == 0 { + println('After adding ${i} entries:') + println(' Cache size: ${user_cache.len()}') + + // Check some entries to verify LRU behavior + if i >= 500 { + old_id := if i < 1000 { u32(0) } else { i - 1000 } + recent_id := i - 1 + println(' Entry ${old_id} (old): ${if _ := user_cache.get(old_id) { + 'found' + } else { + 'evicted' + }}') + println(' Entry ${recent_id} (recent): ${if _ := user_cache.get(recent_id) { + 'found' + } else { + 'evicted' + }}') + } + println('') + } + } + + println('Final statistics:') + println('Cache size: ${user_cache.len()} (should be max 1000)') + + // Verify we can only access recent entries + println('\nVerifying LRU behavior:') + println('First entry (0): ${if _ := user_cache.get(0) { 'found' } else { 'evicted' }}') + println('Middle entry (1000): ${if _ := user_cache.get(1000) { 'found' } else { 'evicted' }}') + println('Recent entry (1900): ${if _ := user_cache.get(1900) { 'found' } else { 'evicted' }}') + println('Last entry (1999): ${if _ := user_cache.get(1999) { 'found' } else { 'evicted' }}') + + // Demonstrate TTL expiration + println('\nDemonstrating TTL expiration:') + quick_config := cache.CacheConfig{ + ttl_seconds: 2 // Set short TTL for demo + } + mut quick_cache := cache.new_cache[User](quick_config) + + // Add a user + quick_cache.set(user1.id, user1) + println('Added user to cache with 2 second TTL') + + if cached := quick_cache.get(user1.id) { + println('User found immediately: ${cached.name}') + } + + // Wait for TTL to expire + println('Waiting for TTL to expire...') + time.sleep(3 * time.second) + + if _ := quick_cache.get(user1.id) { + println('User still in cache') + } else { + println('User expired from cache as expected') + } +} diff --git a/examples/data/encoder.vsh b/examples/data/encoder.vsh index 4ebb9771..88db1ea7 100755 --- a/examples/data/encoder.vsh +++ b/examples/data/encoder.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.data.encoder import crypto.ed25519 diff --git a/examples/data/encrypt_decrypt.vsh b/examples/data/encrypt_decrypt.vsh index 742858a8..74a38407 100755 --- a/examples/data/encrypt_decrypt.vsh +++ b/examples/data/encrypt_decrypt.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.crypt.aes_symmetric { decrypt, encrypt } import freeflowuniverse.herolib.ui.console diff --git a/examples/data/graphdb.vsh b/examples/data/graphdb.vsh new file mode 100755 index 00000000..4813de52 --- /dev/null +++ b/examples/data/graphdb.vsh @@ -0,0 +1,175 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +// Example demonstrating GraphDB usage in a social network context +import freeflowuniverse.herolib.data.graphdb + +fn main() { + // Initialize a new graph database with default cache settings + mut gdb := graphdb.new( + path: '/tmp/social_network_example' + reset: true // Start fresh each time + )! + + println('=== Social Network Graph Example ===\n') + + // 1. Creating User Nodes + println('Creating users...') + mut alice_id := gdb.create_node({ + 'type': 'user' + 'name': 'Alice Chen' + 'age': '28' + 'location': 'San Francisco' + 'occupation': 'Software Engineer' + })! + println('Created user: ${gdb.debug_node(alice_id)!}') + + mut bob_id := gdb.create_node({ + 'type': 'user' + 'name': 'Bob Smith' + 'age': '32' + 'location': 'New York' + 'occupation': 'Product Manager' + })! + println('Created user: ${gdb.debug_node(bob_id)!}') + + mut carol_id := gdb.create_node({ + 'type': 'user' + 'name': 'Carol Davis' + 'age': '27' + 'location': 'San Francisco' + 'occupation': 'Data Scientist' + })! + println('Created user: ${gdb.debug_node(carol_id)!}') + + // 2. Creating Organization Nodes + println('\nCreating organizations...') + mut techcorp_id := gdb.create_node({ + 'type': 'organization' + 'name': 'TechCorp' + 'industry': 'Technology' + 'location': 'San Francisco' + 'size': '500+' + })! + println('Created organization: ${gdb.debug_node(techcorp_id)!}') + + mut datacorp_id := gdb.create_node({ + 'type': 'organization' + 'name': 'DataCorp' + 'industry': 'Data Analytics' + 'location': 'New York' + 'size': '100-500' + })! + println('Created organization: ${gdb.debug_node(datacorp_id)!}') + + // 3. Creating Interest Nodes + println('\nCreating interest groups...') + mut ai_group_id := gdb.create_node({ + 'type': 'group' + 'name': 'AI Enthusiasts' + 'category': 'Technology' + 'members': '0' + })! + println('Created group: ${gdb.debug_node(ai_group_id)!}') + + // 4. Establishing Relationships + println('\nCreating relationships...') + + // Friendship relationships + gdb.create_edge(alice_id, bob_id, 'FRIENDS', { + 'since': '2022' + 'strength': 'close' + })! + gdb.create_edge(alice_id, carol_id, 'FRIENDS', { + 'since': '2023' + 'strength': 'close' + })! + + // Employment relationships + gdb.create_edge(alice_id, techcorp_id, 'WORKS_AT', { + 'role': 'Senior Engineer' + 'since': '2021' + 'department': 'Engineering' + })! + gdb.create_edge(bob_id, datacorp_id, 'WORKS_AT', { + 'role': 'Product Lead' + 'since': '2020' + 'department': 'Product' + })! + gdb.create_edge(carol_id, techcorp_id, 'WORKS_AT', { + 'role': 'Data Scientist' + 'since': '2022' + 'department': 'Analytics' + })! + + // Group memberships + gdb.create_edge(alice_id, ai_group_id, 'MEMBER_OF', { + 'joined': '2023' + 'status': 'active' + })! + gdb.create_edge(carol_id, ai_group_id, 'MEMBER_OF', { + 'joined': '2023' + 'status': 'active' + })! + + // 5. Querying the Graph + println('\nPerforming queries...') + + // Find users in San Francisco + println('\nUsers in San Francisco:') + sf_users := gdb.query_nodes_by_property('location', 'San Francisco')! + for user in sf_users { + if user.properties['type'] == 'user' { + println('- ${user.properties['name']} (${user.properties['occupation']})') + } + } + + // Find Alice's friends + println("\nAlice's friends:") + alice_friends := gdb.get_connected_nodes(alice_id, 'FRIENDS', 'out')! + for friend in alice_friends { + println('- ${friend.properties['name']} in ${friend.properties['location']}') + } + + // Find where Alice works + println("\nAlice's workplace:") + alice_workplaces := gdb.get_connected_nodes(alice_id, 'WORKS_AT', 'out')! + for workplace in alice_workplaces { + println('- ${workplace.properties['name']} (${workplace.properties['industry']})') + } + + // Find TechCorp employees + println('\nTechCorp employees:') + techcorp_employees := gdb.get_connected_nodes(techcorp_id, 'WORKS_AT', 'in')! + for employee in techcorp_employees { + println('- ${employee.properties['name']} as ${employee.properties['occupation']}') + } + + // Find AI group members + println('\nAI Enthusiasts group members:') + ai_members := gdb.get_connected_nodes(ai_group_id, 'MEMBER_OF', 'in')! + for member in ai_members { + println('- ${member.properties['name']}') + } + + // 6. Updating Data + println('\nUpdating data...') + + // Promote Alice + println('\nPromoting Alice...') + mut alice := gdb.get_node(alice_id)! + alice.properties['occupation'] = 'Lead Software Engineer' + gdb.update_node(alice_id, alice.properties)! + + // Update Alice's work relationship + mut edges := gdb.get_edges_between(alice_id, techcorp_id)! + if edges.len > 0 { + gdb.update_edge(edges[0].id, { + 'role': 'Engineering Team Lead' + 'since': '2021' + 'department': 'Engineering' + })! + } + + println('\nFinal graph structure:') + gdb.print_graph()! +} diff --git a/examples/data/heroencoder_example.vsh b/examples/data/heroencoder_example.vsh index 7628722c..e25766f2 100755 --- a/examples/data/heroencoder_example.vsh +++ b/examples/data/heroencoder_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.data.encoderhero import freeflowuniverse.herolib.core.base diff --git a/examples/data/heroencoder_simple.vsh b/examples/data/heroencoder_simple.vsh new file mode 100755 index 00000000..c0db981a --- /dev/null +++ b/examples/data/heroencoder_simple.vsh @@ -0,0 +1,30 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.data.encoderhero +import freeflowuniverse.herolib.core.base +import time + +struct Person { +mut: + name string + age int = 20 + birthday time.Time +} + +mut person := Person{ + name: 'Bob' + birthday: time.now() +} +heroscript := encoderhero.encode[Person](person)! + +println(heroscript) + +person2 := encoderhero.decode[Person](heroscript)! +println(person2) + +//show that it doesn't matter which action & method is used +heroscript2:="!!a.b name:Bob age:20 birthday:'2025-02-06 09:57:30'" +person3 := encoderhero.decode[Person](heroscript)! + +println(person3) + diff --git a/examples/data/jsonexample.vsh b/examples/data/jsonexample.vsh new file mode 100755 index 00000000..a50936e3 --- /dev/null +++ b/examples/data/jsonexample.vsh @@ -0,0 +1,37 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + + +import json + +enum JobTitle { + manager + executive + worker +} + +struct Employee { +mut: + name string + family string @[json: '-'] // this field will be skipped + age int + salary f32 + title JobTitle @[json: 'ETitle'] // the key for this field will be 'ETitle', not 'title' + notes string @[omitempty] // the JSON property is not created if the string is equal to '' (an empty string). + // TODO: document @[raw] +} + +x := Employee{'Peter', 'Begins', 28, 95000.5, .worker, ''} +println(x) +s := json.encode(x) +println('JSON encoding of employee x: ${s}') +assert s == '{"name":"Peter","age":28,"salary":95000.5,"ETitle":"worker"}' +mut y := json.decode(Employee, s)! +assert y != x +assert y.family == '' +y.family = 'Begins' +assert y == x +println(y) +ss := json.encode(y) +println('JSON encoding of employee y: ${ss}') +assert ss == s + diff --git a/examples/data/location/location_example.vsh b/examples/data/location/location_example.vsh new file mode 100755 index 00000000..94c3f1ba --- /dev/null +++ b/examples/data/location/location_example.vsh @@ -0,0 +1,64 @@ +#!/usr/bin/env -S v -n -w -cg -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.clients.postgresql_client +import freeflowuniverse.herolib.data.location + +// Configure PostgreSQL client +heroscript := " +!!postgresql_client.configure + name:'test' + user: 'postgres' + port: 5432 + host: 'localhost' + password: '1234' + dbname: 'postgres' +" + +// Process the heroscript configuration +postgresql_client.play(heroscript: heroscript)! + +// Get the configured client +mut db_client := postgresql_client.get(name: "test")! + + +// Create a new location instance +mut loc := location.new(mut db_client, false) or { panic(err) } +println('Location database initialized') + +// Initialize the database (downloads and imports data) +// This only needs to be done once or when updating data +println('Downloading and importing location data (this may take a few minutes)...') + +// the arg is if we redownload +loc.download_and_import(false) or { panic(err) } +println('Data import complete') + +// // Example 1: Search for a city +// println('\nSearching for London...') +// results := loc.search('London', 'GB', 5, true) or { panic(err) } +// for result in results { +// println('${result.city.name}, ${result.country.name} (${result.country.iso2})') +// println('Coordinates: ${result.city.latitude}, ${result.city.longitude}') +// println('Population: ${result.city.population}') +// println('Timezone: ${result.city.timezone}') +// println('---') +// } + +// // Example 2: Search near coordinates (10km radius from London) +// println('\nSearching for cities within 10km of London...') +// nearby := loc.search_near(51.5074, -0.1278, 10.0, 5) or { panic(err) } +// for result in nearby { +// println('${result.city.name}, ${result.country.name}') +// println('Distance from center: Approx ${result.similarity:.1f}km') +// println('---') +// } + +// // Example 3: Fuzzy search in a specific country +// println('\nFuzzy searching for "New" in United States...') +// us_cities := loc.search('New', 'US', 5, true) or { panic(err) } +// for result in us_cities { +// println('${result.city.name}, ${result.country.name}') +// println('State: ${result.city.state_name} (${result.city.state_code})') +// println('Population: ${result.city.population}') +// println('---') +// } diff --git a/examples/data/location/location_example_tcc.vsh b/examples/data/location/location_example_tcc.vsh new file mode 100755 index 00000000..ab1a7efe --- /dev/null +++ b/examples/data/location/location_example_tcc.vsh @@ -0,0 +1,64 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.clients.postgresql_client +import freeflowuniverse.herolib.data.location + +// Configure PostgreSQL client +heroscript := " +!!postgresql_client.configure + name:'test' + user: 'postgres' + port: 5432 + host: 'localhost' + password: '1234' + dbname: 'postgres' +" + +// Process the heroscript configuration +postgresql_client.play(heroscript: heroscript)! + +// Get the configured client +mut db_client := postgresql_client.get(name: "test")! + + +// Create a new location instance +mut loc := location.new(mut db_client, false) or { panic(err) } +println('Location database initialized') + +// Initialize the database (downloads and imports data) +// This only needs to be done once or when updating data +println('Downloading and importing location data (this may take a few minutes)...') + +// the arg is if we redownload +loc.download_and_import(false) or { panic(err) } +println('Data import complete') + +// // Example 1: Search for a city +// println('\nSearching for London...') +// results := loc.search('London', 'GB', 5, true) or { panic(err) } +// for result in results { +// println('${result.city.name}, ${result.country.name} (${result.country.iso2})') +// println('Coordinates: ${result.city.latitude}, ${result.city.longitude}') +// println('Population: ${result.city.population}') +// println('Timezone: ${result.city.timezone}') +// println('---') +// } + +// // Example 2: Search near coordinates (10km radius from London) +// println('\nSearching for cities within 10km of London...') +// nearby := loc.search_near(51.5074, -0.1278, 10.0, 5) or { panic(err) } +// for result in nearby { +// println('${result.city.name}, ${result.country.name}') +// println('Distance from center: Approx ${result.similarity:.1f}km') +// println('---') +// } + +// // Example 3: Fuzzy search in a specific country +// println('\nFuzzy searching for "New" in United States...') +// us_cities := loc.search('New', 'US', 5, true) or { panic(err) } +// for result in us_cities { +// println('${result.city.name}, ${result.country.name}') +// println('State: ${result.city.state_name} (${result.city.state_code})') +// println('Population: ${result.city.population}') +// println('---') +// } diff --git a/examples/data/ourdb_example.vsh b/examples/data/ourdb_example.vsh new file mode 100755 index 00000000..d274c6a8 --- /dev/null +++ b/examples/data/ourdb_example.vsh @@ -0,0 +1,40 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.data.ourdb + +const test_dir = '/tmp/ourdb' + +mut db := ourdb.new( + record_nr_max: 16777216 - 1 // max size of records + record_size_max: 1024 + path: test_dir + reset: true +)! + +defer { + db.destroy() or { panic('failed to destroy db: ${err}') } +} + +// Test set and get +test_data := 'Hello, World!'.bytes() +id := db.set(data: test_data)! + +retrieved := db.get(id)! +assert retrieved == test_data + +assert id == 0 + +// Test overwrite +new_data := 'Updated data'.bytes() +id2 := db.set(id: 0, data: new_data)! +assert id2 == 0 + +// // Verify lookup table has the correct location +// location := db.lookup.get(id2)! +// println('Location after update - file_nr: ${location.file_nr}, position: ${location.position}') + +// Get and verify the updated data +retrieved2 := db.get(id2)! +println('Retrieved data: ${retrieved2}') +println('Expected data: ${new_data}') +assert retrieved2 == new_data diff --git a/examples/data/params/args/args_example.vsh b/examples/data/params/args/args_example.vsh index 45aca2da..b5962bfd 100755 --- a/examples/data/params/args/args_example.vsh +++ b/examples/data/params/args/args_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.playbook import freeflowuniverse.herolib.data.paramsparser diff --git a/examples/data/params/paramsfilter/paramsfilter.vsh b/examples/data/params/paramsfilter/paramsfilter.vsh index 9136ff58..5b7c858c 100755 --- a/examples/data/params/paramsfilter/paramsfilter.vsh +++ b/examples/data/params/paramsfilter/paramsfilter.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.data.paramsparser { Params, parse } import time diff --git a/examples/data/radixtree.vsh b/examples/data/radixtree.vsh new file mode 100755 index 00000000..b439386a --- /dev/null +++ b/examples/data/radixtree.vsh @@ -0,0 +1,33 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.data.radixtree + +mut rt := radixtree.new(path: '/tmp/radixtree_test', reset: true)! + +// Show initial state +println('\nInitial state:') +rt.debug_db()! + +// Test insert +println('\nInserting key "test" with value "value1"') +rt.insert('test', 'value1'.bytes())! + +// Show state after insert +println('\nState after insert:') +rt.debug_db()! + +// Print tree structure +rt.print_tree()! + +// Test search +if value := rt.search('test') { + println('\nFound value: ${value.bytestr()}') +} else { + println('\nError: ${err}') +} + +println('\nInserting key "test2" with value "value2"') +rt.insert('test2', 'value2'.bytes())! + +// Print tree structure +rt.print_tree()! diff --git a/examples/data/resp/resp_example.vsh b/examples/data/resp/resp_example.vsh index 2ead50a3..6a9a24e8 100755 --- a/examples/data/resp/resp_example.vsh +++ b/examples/data/resp/resp_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.data.resp import crypto.ed25519 diff --git a/examples/develop/gittools/example3.vsh b/examples/develop/gittools/example3.vsh index 5744b7f4..3befba73 100755 --- a/examples/develop/gittools/example3.vsh +++ b/examples/develop/gittools/example3.vsh @@ -1,9 +1,10 @@ -#!/usr/bin/env -S v -n -w -gc none -cg -no-retry-compilation -cc tcc -d use_openssl -enable-globals run -// #!/usr/bin/env -S v -n -w -cg -no-retry-compilation -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cg -cc tcc -d use_openssl -enable-globals run + +// #!/usr/bin/env -S v -n -w -cg -d use_openssl -enable-globals run //-parallel-cc import os import freeflowuniverse.herolib.develop.gittools -mut gs := gittools.get(reload:true)! +mut gs := gittools.get(reload: true)! -gs.repos_print()! \ No newline at end of file +gs.repos_print()! diff --git a/examples/develop/gittools/gittools_example.vsh b/examples/develop/gittools/gittools_example.vsh index d9e39f27..f5817995 100755 --- a/examples/develop/gittools/gittools_example.vsh +++ b/examples/develop/gittools/gittools_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.develop.gittools import freeflowuniverse.herolib.osal diff --git a/examples/develop/gittools/gittools_example2.vsh b/examples/develop/gittools/gittools_example2.vsh index ece58866..c768b256 100755 --- a/examples/develop/gittools/gittools_example2.vsh +++ b/examples/develop/gittools/gittools_example2.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.develop.gittools import freeflowuniverse.herolib.osal diff --git a/examples/develop/juggler/v_example.vsh b/examples/develop/juggler/v_example.vsh index 23a6106a..b81ffd52 100755 --- a/examples/develop/juggler/v_example.vsh +++ b/examples/develop/juggler/v_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import freeflowuniverse.herolib.osal diff --git a/examples/develop/juggler/v_example2.vsh b/examples/develop/juggler/v_example2.vsh index 120868b4..54fe1acc 100755 --- a/examples/develop/juggler/v_example2.vsh +++ b/examples/develop/juggler/v_example2.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.sysadmin.startupmanager import os diff --git a/examples/develop/luadns/example.vsh b/examples/develop/luadns/example.vsh index 582225c7..cebd9bdf 100644 --- a/examples/develop/luadns/example.vsh +++ b/examples/develop/luadns/example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.develop.luadns diff --git a/examples/develop/openai/openai_example.vsh b/examples/develop/openai/openai_example.vsh index c48599c3..b65e00bf 100644 --- a/examples/develop/openai/openai_example.vsh +++ b/examples/develop/openai/openai_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.clients.openai as op diff --git a/examples/develop/runpod/runpod_example.vsh b/examples/develop/runpod/runpod_example.vsh index 30ddfc62..d0029a3f 100755 --- a/examples/develop/runpod/runpod_example.vsh +++ b/examples/develop/runpod/runpod_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run // import freeflowuniverse.herolib.core.base import freeflowuniverse.herolib.clients.runpod diff --git a/examples/develop/vastai/vastai_example.vsh b/examples/develop/vastai/vastai_example.vsh index a73255f7..5e1eaaf9 100755 --- a/examples/develop/vastai/vastai_example.vsh +++ b/examples/develop/vastai/vastai_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.clients.vastai import json @@ -24,19 +24,19 @@ create_instance_res := va.create_instance( println('create instance res: ${create_instance_res}') attach_sshkey_to_instance_res := va.attach_sshkey_to_instance( - id: 1 - ssh_key: "ssh-rsa AAAA..." + id: 1 + ssh_key: 'ssh-rsa AAAA...' )! println('attach sshkey to instance res: ${attach_sshkey_to_instance_res}') stop_instance_res := va.stop_instance( - id: 1 - state: "stopped" + id: 1 + state: 'stopped' )! println('stop instance res: ${stop_instance_res}') destroy_instance_res := va.destroy_instance( - id: 1 + id: 1 )! println('destroy instance res: ${destroy_instance_res}') @@ -44,23 +44,23 @@ println('destroy instance res: ${destroy_instance_res}') // (request failed with code 500: {"error":"server_error","msg":"Something went wrong on the server"}) launch_instance_res := va.launch_instance( // Required - num_gpus: 1, - gpu_name: "RTX_3090", - image: 'vastai/tensorflow', - disk: 10, - region: "us-west", + num_gpus: 1 + gpu_name: 'RTX_3090' + image: 'vastai/tensorflow' + disk: 10 + region: 'us-west' // Optional - env: "user=7amada, home=/home/7amada", + env: 'user=7amada, home=/home/7amada' )! println('destroy instance res: ${launch_instance_res}') start_instances_res := va.start_instances( - ids: [1, 2, 3] + ids: [1, 2, 3] )! println('start instances res: ${start_instances_res}') start_instance_res := va.start_instance( - id: 1 + id: 1 )! println('start instance res: ${start_instance_res}') diff --git a/examples/hero/alpine_example.vsh b/examples/hero/alpine_example.vsh index 994a1a7b..9be0f17d 100755 --- a/examples/hero/alpine_example.vsh +++ b/examples/hero/alpine_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.hero.bootstrap diff --git a/examples/hero/generation/blank_generation/example_1.vsh b/examples/hero/generation/blank_generation/example_1.vsh index 428b21c4..451a73e0 100644 --- a/examples/hero/generation/blank_generation/example_1.vsh +++ b/examples/hero/generation/blank_generation/example_1.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.hero.generation diff --git a/examples/hero/generation/blank_generation/example_2.vsh b/examples/hero/generation/blank_generation/example_2.vsh index dbdbed32..bef29f7a 100644 --- a/examples/hero/generation/blank_generation/example_2.vsh +++ b/examples/hero/generation/blank_generation/example_2.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.hero.generation diff --git a/examples/hero/generation/openapi_generation/generate_actor.vsh b/examples/hero/generation/openapi_generation/generate_actor.vsh index 158bb5c7..135e08cd 100644 --- a/examples/hero/generation/openapi_generation/generate_actor.vsh +++ b/examples/hero/generation/openapi_generation/generate_actor.vsh @@ -1 +1 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run diff --git a/examples/hero/generation/openapi_generation/run_actor.vsh b/examples/hero/generation/openapi_generation/run_actor.vsh index 158bb5c7..53056db8 100644 --- a/examples/hero/generation/openapi_generation/run_actor.vsh +++ b/examples/hero/generation/openapi_generation/run_actor.vsh @@ -1 +1 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run diff --git a/examples/hero/generation/openapi_generation/run_interface_procedure.vsh b/examples/hero/generation/openapi_generation/run_interface_procedure.vsh index a09bffc0..5ee4e5c3 100755 --- a/examples/hero/generation/openapi_generation/run_interface_procedure.vsh +++ b/examples/hero/generation/openapi_generation/run_interface_procedure.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import example_actor diff --git a/examples/hero/generation/openapi_generation/run_server.vsh b/examples/hero/generation/openapi_generation/run_server.vsh index 158bb5c7..135e08cd 100644 --- a/examples/hero/generation/openapi_generation/run_server.vsh +++ b/examples/hero/generation/openapi_generation/run_server.vsh @@ -1 +1 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run diff --git a/examples/installers/actrunner.vsh b/examples/installers/actrunner.vsh index 0b718ca8..449736b1 100755 --- a/examples/installers/actrunner.vsh +++ b/examples/installers/actrunner.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.sysadmintools.actrunner import freeflowuniverse.herolib.installers.virt.herocontainers diff --git a/examples/installers/conduit.vsh b/examples/installers/conduit.vsh index 0605eb36..d4e1a69d 100755 --- a/examples/installers/conduit.vsh +++ b/examples/installers/conduit.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.fediverse.conduit diff --git a/examples/installers/coredns.vsh b/examples/installers/coredns.vsh index 7ead086d..dda45b90 100755 --- a/examples/installers/coredns.vsh +++ b/examples/installers/coredns.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.infra.coredns as coredns_installer diff --git a/examples/installers/dagu.vsh b/examples/installers/dagu.vsh index d586c380..39704612 100755 --- a/examples/installers/dagu.vsh +++ b/examples/installers/dagu.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.sysadmintools.daguserver import freeflowuniverse.herolib.installers.infra.zinit diff --git a/examples/installers/dagu_server.vsh b/examples/installers/dagu_server.vsh index f9604090..00a0156a 100755 --- a/examples/installers/dagu_server.vsh +++ b/examples/installers/dagu_server.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.sysadmintools.daguserver diff --git a/examples/installers/gitea.vsh b/examples/installers/gitea.vsh index c4afe912..d8df7013 100755 --- a/examples/installers/gitea.vsh +++ b/examples/installers/gitea.vsh @@ -1,34 +1,15 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.infra.gitea as gitea_installer -// First of all, we need to set the gitea configuration -// heroscript := " -// !!gitea.configure -// name:'default' -// version:'1.22.6' -// path: '/var/lib/git' -// passwd: '12345678' -// postgresql_name: 'default' -// mail_from: 'git@meet.tf' -// smtp_addr: 'smtp-relay.brevo.com' -// smtp_login: 'admin' -// smtp_port: 587 -// smtp_passwd: '12345678' -// domain: 'meet.tf' -// jwt_secret: '' -// lfs_jwt_secret: '' -// internal_token: '' -// secret_key: '' -// " -// gitea_installer.play( -// name: 'default' -// heroscript: heroscript -// )! +mut installer:= gitea_installer.get(name:'test')! -// Then we need to get an instace of the installer and call the install -mut gitea := gitea_installer.get()! -// println('gitea configs: ${gitea}') -gitea.install()! -gitea.start()! +//if you want to configure using heroscript +gitea_installer.play(heroscript:" + !!gitea.configure name:test + passwd:'something' + domain: 'docs.info.com' + ")! + +installer.start()! diff --git a/examples/installers/griddriver.vsh b/examples/installers/griddriver.vsh index 6535ec13..a001aa60 100755 --- a/examples/installers/griddriver.vsh +++ b/examples/installers/griddriver.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.threefold.griddriver diff --git a/examples/installers/hero_install.vsh b/examples/installers/hero_install.vsh index a2a483ed..e2d5365c 100755 --- a/examples/installers/hero_install.vsh +++ b/examples/installers/hero_install.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.lang.vlang import freeflowuniverse.herolib.installers.sysadmintools.daguserver diff --git a/examples/installers/herocontainers.vsh b/examples/installers/herocontainers.vsh index 531d1623..a42e0a6a 100755 --- a/examples/installers/herocontainers.vsh +++ b/examples/installers/herocontainers.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.osal import freeflowuniverse.herolib.installers.lang.golang diff --git a/examples/installers/installers.vsh b/examples/installers/installers.vsh index 0c901be9..d61cf087 100755 --- a/examples/installers/installers.vsh +++ b/examples/installers/installers.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.lang.rust import freeflowuniverse.herolib.installers.lang.python diff --git a/examples/installers/mycelium.vsh b/examples/installers/mycelium.vsh index 50791b0e..13cc0021 100755 --- a/examples/installers/mycelium.vsh +++ b/examples/installers/mycelium.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.net.mycelium as mycelium_installer diff --git a/examples/installers/podman.vsh b/examples/installers/podman.vsh index b512abc8..71f771df 100755 --- a/examples/installers/podman.vsh +++ b/examples/installers/podman.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.virt.podman as podman_installer diff --git a/examples/installers/postgresql.vsh b/examples/installers/postgresql.vsh index adabfdec..b826d972 100755 --- a/examples/installers/postgresql.vsh +++ b/examples/installers/postgresql.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import time import freeflowuniverse.herolib.installers.db.postgresql diff --git a/examples/installers/youki.vsh b/examples/installers/youki.vsh index d23fa428..0e00e757 100755 --- a/examples/installers/youki.vsh +++ b/examples/installers/youki.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.virt.youki diff --git a/examples/lang/python/pythonexample.vsh b/examples/lang/python/pythonexample.vsh index 40ee55ed..13015b52 100755 --- a/examples/lang/python/pythonexample.vsh +++ b/examples/lang/python/pythonexample.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.lang.python import json diff --git a/examples/osal/download/download_example.vsh b/examples/osal/download/download_example.vsh index 0ae2d2e1..f88006cf 100755 --- a/examples/osal/download/download_example.vsh +++ b/examples/osal/download/download_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.osal { download } diff --git a/examples/osal/notifier.vsh b/examples/osal/notifier.vsh new file mode 100755 index 00000000..0e7f07a4 --- /dev/null +++ b/examples/osal/notifier.vsh @@ -0,0 +1,48 @@ +#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.osal.notifier +import os +import time + +fn on_file_change(event notifier.NotifyEvent, path string, args map[string]string) { + match event { + .create { println('File created: ${path}') } + .modify { println('File modified: ${path}') } + .delete { println('File deleted: ${path}') } + .rename { println('File renamed: ${path}') } + } +} + +fn main() { + // Create test directory and files + test_dir := '/tmp/notifytest' + if !os.exists(test_dir) { + os.mkdir_all(test_dir)! + os.write_file('${test_dir}/test.txt', 'initial content')! + os.mkdir('${test_dir}/subdir')! + os.write_file('${test_dir}/subdir/test2.txt', 'test content')! + } + + // Create a new notifier + mut n := notifier.new('test_watcher')! + + // Add files to watch + n.add_watch('${test_dir}', on_file_change)! + + // Start watching + n.start()! + + println('Watching files in ${test_dir} for 60 seconds...') + println('Try these operations to test the notifier:') + println('1. Modify a file: echo "new content" > ${test_dir}/test.txt') + println('2. Create a file: touch ${test_dir}/newfile.txt') + println('3. Delete a file: rm ${test_dir}/test.txt') + println('4. Rename a file: mv ${test_dir}/test.txt ${test_dir}/renamed.txt') + + // Keep the program running for 60 seconds + time.sleep(60 * time.second) + + // Clean up + n.stop() + println('\nWatch period ended.') +} diff --git a/examples/osal/ping/ping_example.vsh b/examples/osal/ping/ping_example.vsh index c5ac1b77..cd6a01c6 100755 --- a/examples/osal/ping/ping_example.vsh +++ b/examples/osal/ping/ping_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.osal { ping } diff --git a/examples/osal/ping/portforward.vsh b/examples/osal/ping/portforward.vsh index 9fb73241..83c3f953 100755 --- a/examples/osal/ping/portforward.vsh +++ b/examples/osal/ping/portforward.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.builder diff --git a/examples/osal/startup_manager.vsh b/examples/osal/startup_manager.vsh index 1d068238..0e5dca92 100755 --- a/examples/osal/startup_manager.vsh +++ b/examples/osal/startup_manager.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.infra.zinit as zinitinstaller import freeflowuniverse.herolib.sysadmin.startupmanager diff --git a/examples/osal/systemd.vsh b/examples/osal/systemd.vsh index becb9d1f..c7778014 100755 --- a/examples/osal/systemd.vsh +++ b/examples/osal/systemd.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.osal.systemd diff --git a/examples/osal/ufw.vsh b/examples/osal/ufw.vsh index ad602a78..592cf4dc 100755 --- a/examples/osal/ufw.vsh +++ b/examples/osal/ufw.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.osal.ufw diff --git a/examples/osal/ufw_play.vsh b/examples/osal/ufw_play.vsh index ebdc5822..02dda0ed 100755 --- a/examples/osal/ufw_play.vsh +++ b/examples/osal/ufw_play.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.osal.ufw import freeflowuniverse.herolib.core.playbook diff --git a/examples/osal/zinit/simple/zinit.vsh b/examples/osal/zinit/simple/zinit.vsh index 68abaa41..cb0d6c3a 100644 --- a/examples/osal/zinit/simple/zinit.vsh +++ b/examples/osal/zinit/simple/zinit.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import time diff --git a/examples/threefold/grid/README.md b/examples/threefold/grid/README.md index a61e5025..33f353a8 100644 --- a/examples/threefold/grid/README.md +++ b/examples/threefold/grid/README.md @@ -7,7 +7,7 @@ To be able to run examples you need to install updated version of `griddriver`. Create some `griddriver_install.vsh` file containing following code: ```vlang -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.tfgrid.griddriver as griddriverinstaller diff --git a/examples/threefold/grid/deploy/create_update_deployments.vsh b/examples/threefold/grid/deploy/create_update_deployments.vsh index cc1d41a8..d35693a0 100755 --- a/examples/threefold/grid/deploy/create_update_deployments.vsh +++ b/examples/threefold/grid/deploy/create_update_deployments.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid.models import freeflowuniverse.herolib.threefold.grid as tfgrid diff --git a/examples/threefold/grid/deploy/deploy_gw_fqdn.vsh b/examples/threefold/grid/deploy/deploy_gw_fqdn.vsh index d6b12e46..1c0994a8 100755 --- a/examples/threefold/grid/deploy/deploy_gw_fqdn.vsh +++ b/examples/threefold/grid/deploy/deploy_gw_fqdn.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid as tfgrid import freeflowuniverse.herolib.threefold.grid.models diff --git a/examples/threefold/grid/deploy/deploy_gw_name.vsh b/examples/threefold/grid/deploy/deploy_gw_name.vsh index 86923384..64675c82 100755 --- a/examples/threefold/grid/deploy/deploy_gw_name.vsh +++ b/examples/threefold/grid/deploy/deploy_gw_name.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid as tfgrid import freeflowuniverse.herolib.threefold.grid.models diff --git a/examples/threefold/grid/deploy/deploy_vm.vsh b/examples/threefold/grid/deploy/deploy_vm.vsh index f6aa4e5e..93b37c0f 100755 --- a/examples/threefold/grid/deploy/deploy_vm.vsh +++ b/examples/threefold/grid/deploy/deploy_vm.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid.models import freeflowuniverse.herolib.threefold.grid as tfgrid diff --git a/examples/threefold/grid/deploy/deploy_vm_high_level.vsh b/examples/threefold/grid/deploy/deploy_vm_high_level.vsh index 1bdfb99d..df37cadf 100755 --- a/examples/threefold/grid/deploy/deploy_vm_high_level.vsh +++ b/examples/threefold/grid/deploy/deploy_vm_high_level.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid.models import freeflowuniverse.herolib.threefold.grid as tfgrid diff --git a/examples/threefold/grid/deploy/deploy_zdb.vsh b/examples/threefold/grid/deploy/deploy_zdb.vsh index c6c8d180..1a408407 100755 --- a/examples/threefold/grid/deploy/deploy_zdb.vsh +++ b/examples/threefold/grid/deploy/deploy_zdb.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid.models import freeflowuniverse.herolib.threefold.grid as tfgrid diff --git a/examples/threefold/grid/deploy/holochain_vm.vsh b/examples/threefold/grid/deploy/holochain_vm.vsh index 7288285a..b7472f61 100755 --- a/examples/threefold/grid/deploy/holochain_vm.vsh +++ b/examples/threefold/grid/deploy/holochain_vm.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid.models import freeflowuniverse.herolib.threefold.grid as tfgrid diff --git a/examples/threefold/grid/deploy/vm_with_gw_name.vsh b/examples/threefold/grid/deploy/vm_with_gw_name.vsh index a606479d..8fc2b7ed 100755 --- a/examples/threefold/grid/deploy/vm_with_gw_name.vsh +++ b/examples/threefold/grid/deploy/vm_with_gw_name.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid.models import freeflowuniverse.herolib.threefold.grid as tfgrid diff --git a/examples/threefold/grid/deployment_state.vsh b/examples/threefold/grid/deployment_state.vsh index dd44cc19..48cc93f1 100644 --- a/examples/threefold/grid/deployment_state.vsh +++ b/examples/threefold/grid/deployment_state.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run struct DeploymentStateDB { secret string // to encrypt symmetric diff --git a/examples/threefold/grid/utils/cancel_contract.vsh b/examples/threefold/grid/utils/cancel_contract.vsh index a4056524..5cdb450c 100755 --- a/examples/threefold/grid/utils/cancel_contract.vsh +++ b/examples/threefold/grid/utils/cancel_contract.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import log diff --git a/examples/threefold/grid/utils/cancel_contracts.vsh b/examples/threefold/grid/utils/cancel_contracts.vsh index bb506fb9..15a7aa84 100755 --- a/examples/threefold/grid/utils/cancel_contracts.vsh +++ b/examples/threefold/grid/utils/cancel_contracts.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid as tfgrid import log diff --git a/examples/threefold/grid/utils/tfgrid_config.vsh b/examples/threefold/grid/utils/tfgrid_config.vsh index 867f35fd..df8c7c13 100755 --- a/examples/threefold/grid/utils/tfgrid_config.vsh +++ b/examples/threefold/grid/utils/tfgrid_config.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid as tfgrid diff --git a/examples/threefold/grid/utils/zos_version.vsh b/examples/threefold/grid/utils/zos_version.vsh index eadce80b..d4cbe23c 100755 --- a/examples/threefold/grid/utils/zos_version.vsh +++ b/examples/threefold/grid/utils/zos_version.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid as tfgrid import freeflowuniverse.herolib.threefold.griddriver diff --git a/examples/threefold/grid/vm_example.vsh b/examples/threefold/grid/vm_example.vsh index 2f158341..ae59f638 100644 --- a/examples/threefold/grid/vm_example.vsh +++ b/examples/threefold/grid/vm_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run struct VMSpecs { deployment_name string diff --git a/examples/threefold/grid/vm_query_example.vsh b/examples/threefold/grid/vm_query_example.vsh index 402330c4..5dd4fb6c 100644 --- a/examples/threefold/grid/vm_query_example.vsh +++ b/examples/threefold/grid/vm_query_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run struct NodeQuery { location string // how to define location diff --git a/examples/threefold/grid/webgw_example.vsh b/examples/threefold/grid/webgw_example.vsh index b2cfa376..42586015 100644 --- a/examples/threefold/grid/webgw_example.vsh +++ b/examples/threefold/grid/webgw_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run struct WebGWArgs { deployment_name string diff --git a/examples/threefold/grid/zdb_example.vsh b/examples/threefold/grid/zdb_example.vsh index 21b5df3d..ab1f2cb7 100644 --- a/examples/threefold/grid/zdb_example.vsh +++ b/examples/threefold/grid/zdb_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.redisclient { RedisClient } diff --git a/examples/threefold/gridproxy/bill.vsh b/examples/threefold/gridproxy/bill.vsh index 99de7f33..0fe36b9c 100755 --- a/examples/threefold/gridproxy/bill.vsh +++ b/examples/threefold/gridproxy/bill.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/gridproxy/contract.vsh b/examples/threefold/gridproxy/contract.vsh index 67b97da9..e4c15571 100755 --- a/examples/threefold/gridproxy/contract.vsh +++ b/examples/threefold/gridproxy/contract.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid as tfgrid import freeflowuniverse.herolib.threefold.gridproxy diff --git a/examples/threefold/gridproxy/farm.vsh b/examples/threefold/gridproxy/farm.vsh index a7b68d6b..1b96e0f8 100755 --- a/examples/threefold/gridproxy/farm.vsh +++ b/examples/threefold/gridproxy/farm.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/gridproxy/gateway.vsh b/examples/threefold/gridproxy/gateway.vsh index d7968147..485d5cde 100755 --- a/examples/threefold/gridproxy/gateway.vsh +++ b/examples/threefold/gridproxy/gateway.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/gridproxy/grid.vsh b/examples/threefold/gridproxy/grid.vsh index 01c26f95..383cd103 100755 --- a/examples/threefold/gridproxy/grid.vsh +++ b/examples/threefold/gridproxy/grid.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/gridproxy/node.vsh b/examples/threefold/gridproxy/node.vsh index 4107464d..caa2ab3a 100755 --- a/examples/threefold/gridproxy/node.vsh +++ b/examples/threefold/gridproxy/node.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/gridproxy/stats.vsh b/examples/threefold/gridproxy/stats.vsh index f8b164d6..18432edd 100755 --- a/examples/threefold/gridproxy/stats.vsh +++ b/examples/threefold/gridproxy/stats.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.threefold.gridproxy.model { NodeStatus } diff --git a/examples/threefold/gridproxy/twin.vsh b/examples/threefold/gridproxy/twin.vsh index 4e74d5b4..2300da82 100755 --- a/examples/threefold/gridproxy/twin.vsh +++ b/examples/threefold/gridproxy/twin.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/holochain/holochain_deployer.vsh b/examples/threefold/holochain/holochain_deployer.vsh index d3e6ebb8..f3e3ae1e 100755 --- a/examples/threefold/holochain/holochain_deployer.vsh +++ b/examples/threefold/holochain/holochain_deployer.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.tfrobot import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/holochain/holochain_vms.vsh b/examples/threefold/holochain/holochain_vms.vsh index 2fa435e9..bbcdd594 100755 --- a/examples/threefold/holochain/holochain_vms.vsh +++ b/examples/threefold/holochain/holochain_vms.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.tfrobot import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/holochain/tasker_example.vsh b/examples/threefold/holochain/tasker_example.vsh index e342b290..1ea20b37 100755 --- a/examples/threefold/holochain/tasker_example.vsh +++ b/examples/threefold/holochain/tasker_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.tfrobot import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/holochain/tasker_example2.vsh b/examples/threefold/holochain/tasker_example2.vsh index 9777bd0f..ab1c1bb5 100755 --- a/examples/threefold/holochain/tasker_example2.vsh +++ b/examples/threefold/holochain/tasker_example2.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.tfrobot import freeflowuniverse.herolib.ui.console diff --git a/examples/threefold/solana/seahorse_vm.vsh b/examples/threefold/solana/seahorse_vm.vsh index 6d1f0866..7efcc25e 100755 --- a/examples/threefold/solana/seahorse_vm.vsh +++ b/examples/threefold/solana/seahorse_vm.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.grid.models import freeflowuniverse.herolib.threefold.grid as tfgrid diff --git a/examples/threefold/tfgrid3deployer/hetzner/hetzner.vsh b/examples/threefold/tfgrid3deployer/hetzner/hetzner.vsh new file mode 100644 index 00000000..b931274f --- /dev/null +++ b/examples/threefold/tfgrid3deployer/hetzner/hetzner.vsh @@ -0,0 +1,37 @@ +#!/usr/bin/env -S v -gc none -d use_openssl -enable-globals -cg run + +//#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals -cg run +import freeflowuniverse.herolib.threefold.gridproxy +import freeflowuniverse.herolib.threefold.tfgrid3deployer +import freeflowuniverse.herolib.installers.threefold.griddriver +import os +import time + +griddriver.install()! + +v := tfgrid3deployer.get()! +println('cred: ${v}') +deployment_name := 'herzner_dep' +mut deployment := tfgrid3deployer.new_deployment(deployment_name)! + +// TODO: find a way to filter hetzner nodes +deployment.add_machine( + name: 'hetzner_vm' + cpu: 1 + memory: 2 + planetary: false + public_ip4: true + size: 10 // 10 gig + mycelium: tfgrid3deployer.Mycelium{} +) +deployment.deploy()! + +vm1 := deployment.vm_get('hetzner_vm')! +println('vm1 info: ${vm1}') + +vm1_public_ip4 := vm1.public_ip4.all_before('/') + +deployment.add_webname(name: 'gwtohetzner', backend: 'http://${vm1_public_ip4}:80') +deployment.deploy()! +gw1 := deployment.webname_get('gwtohetzner')! +println('gw info: ${gw1}') diff --git a/examples/threefold/tfgrid3deployer/open_webui_gw/open_webui_gw.vsh b/examples/threefold/tfgrid3deployer/open_webui_gw/open_webui_gw.vsh new file mode 100755 index 00000000..08814a67 --- /dev/null +++ b/examples/threefold/tfgrid3deployer/open_webui_gw/open_webui_gw.vsh @@ -0,0 +1,41 @@ +#!/usr/bin/env -S v -gc none -no-retry-compilation -d use_openssl -enable-globals -cg run + +//#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals -cg run +import freeflowuniverse.herolib.threefold.gridproxy +import freeflowuniverse.herolib.threefold.tfgrid3deployer +import freeflowuniverse.herolib.installers.threefold.griddriver +import os +import time + +griddriver.install()! + +v := tfgrid3deployer.get()! +println('cred: ${v}') + +deployment_name := 'openwebui_example' +mut deployment := tfgrid3deployer.new_deployment(deployment_name)! + +deployment.add_machine( + name: 'vm1' + cpu: 1 + memory: 16 + planetary: true + size: 100 // 10 gig + flist: 'https://hub.grid.tf/mariobassem1.3bot/docker.io-threefolddev-open_webui-latest.flist' +) +deployment.deploy()! + +vm1 := deployment.vm_get('vm1')! +println('vm1 info: ${vm1}') + +deployment.add_webname( + name: 'openwebui' + backend: 'http://${vm1.wireguard_ip}:8080' + use_wireguard_network: true +) +deployment.deploy()! + +gw1 := deployment.webname_get('openwebui')! +println('gw info: ${gw1}') + +// tfgrid3deployer.delete_deployment(deployment_name)! diff --git a/examples/threefold/tfgrid3deployer/open_webui_gw/readme.md b/examples/threefold/tfgrid3deployer/open_webui_gw/readme.md new file mode 100644 index 00000000..11ab11ea --- /dev/null +++ b/examples/threefold/tfgrid3deployer/open_webui_gw/readme.md @@ -0,0 +1,53 @@ +# OpenWebUI Deployment on ThreeFold Grid + +## Overview +This script automates the deployment of an OpenWebUI instance on the ThreeFold Grid using the `tfgrid3deployer` module. It sets up a virtual machine (VM), configures networking, and assigns a webname for easy access. + +## Requirements +- V compiler installed +- OpenSSL support enabled +- herolib dependencies: + - `freeflowuniverse.herolib.threefold.gridproxy` + - `freeflowuniverse.herolib.threefold.tfgrid3deployer` + - `freeflowuniverse.herolib.installers.threefold.griddriver` + +## Installation +Ensure you have the required dependencies installed. The script will automatically install the `griddriver` before proceeding. + +## Usage +Run the script using the following command: + +```sh +./open_webui_gw.vsh +``` + +### Script Execution Steps +1. Installs the necessary ThreeFold Grid driver. +2. Retrieves credentials for deployment. +3. Creates a new deployment named `openwebui_example`. +4. Adds a VM with the following specifications: + - 1 CPU + - 16GB RAM + - 100GB storage + - Uses planetary networking + - Deploys OpenWebUI from the ThreeFold Hub. +5. Deploys the VM. +6. Retrieves VM information. +7. Configures a webname (`openwebui`) pointing to the VM's backend. +8. Deploys the webname for public access. +9. Retrieves and displays webname gateway details. + +## Cleanup +To delete the deployment, run the following line in the script: + +```v +tfgrid3deployer.delete_deployment(deployment_name)! +``` + + +## Gateway Information +The gateway points to the WireGuard IP of the VM on port 8080, which is the port that the OpenWebUI server is listening on. + +## Notes +- Ensure you have a valid ThreeFold Grid account and necessary permissions to deploy resources. +- Adjust VM specifications based on your requirements. diff --git a/examples/threefold/tfgrid3deployer/tfgrid3deployer_example.vsh b/examples/threefold/tfgrid3deployer/tfgrid3deployer_example.vsh index 9bd099bc..6aaa9956 100755 --- a/examples/threefold/tfgrid3deployer/tfgrid3deployer_example.vsh +++ b/examples/threefold/tfgrid3deployer/tfgrid3deployer_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.threefold.tfgrid3deployer diff --git a/examples/threefold/tfgrid3deployer/vm_gw_caddy/delete.vsh b/examples/threefold/tfgrid3deployer/vm_gw_caddy/delete.vsh index 24d6e759..a39fb870 100755 --- a/examples/threefold/tfgrid3deployer/vm_gw_caddy/delete.vsh +++ b/examples/threefold/tfgrid3deployer/vm_gw_caddy/delete.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals -cg run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals -cg run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.threefold.tfgrid3deployer diff --git a/examples/threefold/tfgrid3deployer/vm_gw_caddy/vm_gw_caddy.vsh b/examples/threefold/tfgrid3deployer/vm_gw_caddy/vm_gw_caddy.vsh index b499d1dc..05e5ebca 100755 --- a/examples/threefold/tfgrid3deployer/vm_gw_caddy/vm_gw_caddy.vsh +++ b/examples/threefold/tfgrid3deployer/vm_gw_caddy/vm_gw_caddy.vsh @@ -1,6 +1,6 @@ -#!/usr/bin/env -S v -gc none -no-retry-compilation -d use_openssl -enable-globals -cg run +#!/usr/bin/env -S v -gc none -d use_openssl -enable-globals -cg run -//#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals -cg run +//#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals -cg run import freeflowuniverse.herolib.threefold.gridproxy import freeflowuniverse.herolib.threefold.tfgrid3deployer import freeflowuniverse.herolib.installers.threefold.griddriver diff --git a/examples/ui/silence.vsh b/examples/ui/silence.vsh index c58733f2..84627974 100755 --- a/examples/ui/silence.vsh +++ b/examples/ui/silence.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.osal import freeflowuniverse.herolib.ui.console diff --git a/examples/virt/docker/.gitignore b/examples/virt/docker/.gitignore index 5ae2bf86..8655bb24 100644 --- a/examples/virt/docker/.gitignore +++ b/examples/virt/docker/.gitignore @@ -5,3 +5,5 @@ docker_init docker_registry presearch_docker tf_dashboard +docker_dev +docker_ubuntu diff --git a/examples/virt/docker/ai_web_ui/ai_web_ui.vsh b/examples/virt/docker/ai_web_ui/ai_web_ui.vsh new file mode 100644 index 00000000..ffe84d2f --- /dev/null +++ b/examples/virt/docker/ai_web_ui/ai_web_ui.vsh @@ -0,0 +1,30 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.virt.docker + +mut engine := docker.new(prefix: '', localonly: true)! + +mut r := engine.recipe_new(name: 'dev_ubuntu', platform: .ubuntu) + +r.add_from(image: 'ubuntu', tag: 'latest')! +r.add_package(name: 'git,curl')! + +r.add_zinit()! +r.add_sshserver()! + +r.add_run(cmd: 'curl -LsSf https://astral.sh/uv/install.sh | sh')! +r.add_env('PATH', '/root/.local/bin:\$PATH')! +r.add_run(cmd: 'uv python install 3.12')! +r.add_run(cmd: 'uv venv /opt/venv')! +r.add_env('VIRTUAL_ENV', '/opt/venv')! +r.add_env('PATH', '/opt/venv/bin:\$PATH')! +r.add_run(cmd: 'uv pip install open-webui')! +r.add_zinit_cmd( + exec: "bash -c 'VIRTUAL_ENV=/opt/venv DATA_DIR=~/.open-webui /root/.local/bin/uvx --python 3.12 open-webui serve'" + name: 'open-webui' +)! + +r.add_run(cmd: 'apt-get clean')! +r.add_run(cmd: 'rm -rf /var/lib/apt/lists/*')! + +r.build(true)! diff --git a/examples/virt/docker/ai_web_ui/readme.md b/examples/virt/docker/ai_web_ui/readme.md new file mode 100644 index 00000000..0cbf6217 --- /dev/null +++ b/examples/virt/docker/ai_web_ui/readme.md @@ -0,0 +1,13 @@ +# AI Web UI + +- make docker build (see docker_ubuntu example) + - start from docker_ubuntu + - for build use our vlang approach (example see docker_ubuntu, make sure we have our zinit & ssh working) + - install the web UI: openwebui (not by docker but use uv to install this software) + - use https://github.com/astral-sh/uv for the python part + - as last step, clean it all up (remove apt cache, ...) +- push to threefold docker hub + - convert in TF Hub from the docker +- have .vsh script which deploys this solution on TFGrid behind webgw and get people to login + - use wireguard to access the machine (as part of .vsh script) +- make tutorial, so everyone can do it, we will use this to show community how to do something with AI on our grid diff --git a/examples/virt/docker/docker_dev.vsh b/examples/virt/docker/docker_dev.vsh new file mode 100755 index 00000000..ddf54d08 --- /dev/null +++ b/examples/virt/docker/docker_dev.vsh @@ -0,0 +1,58 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.virt.docker + +fn build() ! { + mut engine := docker.new(prefix: '', localonly: true)! + + mut r := engine.recipe_new(name: 'dev_tools', platform: .alpine) + + r.add_from(image: 'alpine', tag: 'latest')! + + r.add_package(name: 'git,mc,htop')! + + r.add_zinit()! + + r.add_sshserver()! + + r.build(true)! +} + +// build()! + +mut engine := docker.new(prefix: '', localonly: true)! + +// Check if dev_tools image exists +if !engine.image_exists(repo: 'dev_tools')! { + eprintln("image dev_tools doesn't exist, build it") + build()! +} + +engine.container_delete(name: 'dev2') or {} + +// Check if container exists and get its status +mut container := engine.container_get( + name: 'dev2' +) or { + // Container doesn't exist, create it + println('Creating dev2 container...') + engine.container_create( + name: 'dev2' + image_repo: 'dev_tools' + remove_when_done: false + forwarded_ports: ['8022:22/tcp'] // this forward 8022 on host to 22 on container + env: { + 'SSH_KEY': 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIahWiRRm9cWAKktH9dndn3R45grKqzPC3mKX8IjGgH6 kristof@incubaid.com' + } + )! +} + +// Start container if not running +if container.status != .up { + println('Starting dev2 container...') + container.start()! +} + +// Open shell to container +println('Opening shell to dev2 container...') +container.shell()! diff --git a/examples/virt/docker/docker_dev_tools.vsh b/examples/virt/docker/docker_dev_tools.vsh deleted file mode 100755 index ef309129..00000000 --- a/examples/virt/docker/docker_dev_tools.vsh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run - -import freeflowuniverse.herolib.virt.docker - -mut engine := docker.new(prefix: '', localonly: true)! - -mut r := engine.recipe_new(name: 'dev_tools', platform: .alpine) - -r.add_from(image: 'alpine', tag: 'latest')! - -r.add_package(name: 'git,vim')! - -r.add_zinit()! - -r.add_sshserver()! - -r.build(true)! diff --git a/examples/virt/docker/docker_init.vsh b/examples/virt/docker/docker_init.vsh index 62d7b78f..a0b906a3 100755 --- a/examples/virt/docker/docker_init.vsh +++ b/examples/virt/docker/docker_init.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.docker diff --git a/examples/virt/docker/docker_registry.vsh b/examples/virt/docker/docker_registry.vsh index 7317a39e..5e3c7ee4 100755 --- a/examples/virt/docker/docker_registry.vsh +++ b/examples/virt/docker/docker_registry.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.docker diff --git a/examples/virt/docker/docker_ubuntu.vsh b/examples/virt/docker/docker_ubuntu.vsh new file mode 100755 index 00000000..4fc4cc3e --- /dev/null +++ b/examples/virt/docker/docker_ubuntu.vsh @@ -0,0 +1,58 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.virt.docker + +fn build() ! { + mut engine := docker.new(prefix: '', localonly: true)! + + mut r := engine.recipe_new(name: 'dev_ubuntu', platform: .ubuntu) + + r.add_from(image: 'ubuntu', tag: '24.10')! + + r.add_package(name: 'git,mc,htop')! + + r.add_zinit()! + + r.add_sshserver()! + + r.build(true)! +} + +build()! + +mut engine := docker.new(prefix: '', localonly: true)! + +// Check if dev_ubuntu image exists +if !engine.image_exists(repo: 'dev_ubuntu')! { + eprintln("image dev_ubuntu doesn't exist, build it") + build()! +} + +engine.container_delete(name: 'dev3') or {} + +// Check if container exists and get its status +mut container := engine.container_get( + name: 'dev3' +) or { + // Container doesn't exist, create it + println('Creating dev3 container...') + engine.container_create( + name: 'dev3' + image_repo: 'dev_ubuntu' + remove_when_done: false + forwarded_ports: ['8023:22/tcp'] // this forward 8022 on host to 22 on container + env: { + 'SSH_KEY': 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIahWiRRm9cWAKktH9dndn3R45grKqzPC3mKX8IjGgH6 kristof@incubaid.com' + } + )! +} + +// Start container if not running +if container.status != .up { + println('Starting dev3 container...') + container.start()! +} + +// Open shell to container +println('Opening shell to dev3 container...') +container.shell()! diff --git a/examples/virt/docker/presearch_docker.vsh b/examples/virt/docker/presearch_docker.vsh index e56c2388..a5f57024 100755 --- a/examples/virt/docker/presearch_docker.vsh +++ b/examples/virt/docker/presearch_docker.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.docker import os diff --git a/examples/virt/docker/tf_dashboard.vsh b/examples/virt/docker/tf_dashboard.vsh index 82d60d39..01e947f4 100755 --- a/examples/virt/docker/tf_dashboard.vsh +++ b/examples/virt/docker/tf_dashboard.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.docker diff --git a/examples/virt/hetzner/hetzner_example.vsh b/examples/virt/hetzner/hetzner_example.vsh index adef3363..5035425c 100755 --- a/examples/virt/hetzner/hetzner_example.vsh +++ b/examples/virt/hetzner/hetzner_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.hetzner import freeflowuniverse.herolib.ui.console diff --git a/examples/virt/lima/lima_example.vsh b/examples/virt/lima/lima_example.vsh index ac2203d4..1214f0c9 100755 --- a/examples/virt/lima/lima_example.vsh +++ b/examples/virt/lima/lima_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.lima import freeflowuniverse.herolib.core.texttools diff --git a/examples/virt/podman_buildah/buildah_example.vsh b/examples/virt/podman_buildah/buildah_example.vsh index 3f6823aa..cfa37739 100755 --- a/examples/virt/podman_buildah/buildah_example.vsh +++ b/examples/virt/podman_buildah/buildah_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.herocontainers import freeflowuniverse.herolib.ui.console diff --git a/examples/virt/podman_buildah/buildah_run.vsh b/examples/virt/podman_buildah/buildah_run.vsh index c3c7f2f9..504b21f5 100755 --- a/examples/virt/podman_buildah/buildah_run.vsh +++ b/examples/virt/podman_buildah/buildah_run.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.herocontainers import freeflowuniverse.herolib.ui.console diff --git a/examples/virt/podman_buildah/buildah_run_clean.vsh b/examples/virt/podman_buildah/buildah_run_clean.vsh index 3eada9d3..e538ed05 100755 --- a/examples/virt/podman_buildah/buildah_run_clean.vsh +++ b/examples/virt/podman_buildah/buildah_run_clean.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.virt.herocontainers import freeflowuniverse.herolib.ui.console diff --git a/examples/virt/podman_buildah/buildah_run_mdbook.vsh b/examples/virt/podman_buildah/buildah_run_mdbook.vsh index 23ffca50..1cb931d1 100755 --- a/examples/virt/podman_buildah/buildah_run_mdbook.vsh +++ b/examples/virt/podman_buildah/buildah_run_mdbook.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import flag diff --git a/examples/virt/windows/cloudhypervisor.vsh b/examples/virt/windows/cloudhypervisor.vsh index ce457359..b9dd9574 100755 --- a/examples/virt/windows/cloudhypervisor.vsh +++ b/examples/virt/windows/cloudhypervisor.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.installers.virt.cloudhypervisor as cloudhypervisor_installer import freeflowuniverse.herolib.virt.cloudhypervisor diff --git a/examples/webdav/webdav.vsh b/examples/webdav/webdav.vsh index 6ca1acb2..28471daf 100755 --- a/examples/webdav/webdav.vsh +++ b/examples/webdav/webdav.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.webdav import freeflowuniverse.herolib.core.pathlib diff --git a/examples/webtools/docusaurus/docusaurus_example.vsh b/examples/webtools/docusaurus/docusaurus_example.vsh new file mode 100755 index 00000000..1efb70aa --- /dev/null +++ b/examples/webtools/docusaurus/docusaurus_example.vsh @@ -0,0 +1,43 @@ +#!/usr/bin/env -S v -n -w -gc none -cg -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.web.docusaurus +// import freeflowuniverse.herolib.data.doctree + +// Create a new docusaurus factory +mut docs := docusaurus.new( + // build_path: '/tmp/docusaurus_build' +)! + +// Create a new docusaurus site +mut site := docs.dev( + url:'https://git.ourworld.tf/despiegk/docs_kristof' +)! + + +//FOR FUTURE TO ADD CONTENT FROM DOCTREE + +// Create a doctree for content +// mut tree := doctree.new(name: 'content')! + +// // Add some content from a git repository +// tree.scan( +// git_url: 'https://github.com/yourusername/your-docs-repo' +// git_pull: true +// )! + +// // Export the content to the docusaurus site +// tree.export( +// destination: '${site.path_build.path}/docs' +// reset: true +// keep_structure: true +// exclude_errors: false +// )! + +// Build the docusaurus site +//site.build()! + +// Generate the static site +//site.generate()! + +// Optionally open the site in a browser +// site.open()! diff --git a/examples/webtools/mdbook_markdown/doctree_export.vsh b/examples/webtools/mdbook_markdown/doctree_export.vsh index fc9a9bea..7d161f2f 100755 --- a/examples/webtools/mdbook_markdown/doctree_export.vsh +++ b/examples/webtools/mdbook_markdown/doctree_export.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.data.doctree diff --git a/examples/webtools/mdbook_markdown/markdown_example.vsh b/examples/webtools/mdbook_markdown/markdown_example.vsh index 2b34ece7..dbfd3675 100755 --- a/examples/webtools/mdbook_markdown/markdown_example.vsh +++ b/examples/webtools/mdbook_markdown/markdown_example.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run // import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.ui.console diff --git a/generate.vsh b/generate.vsh index b6df27e0..b2766679 100755 --- a/generate.vsh +++ b/generate.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -n -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run import os import flag diff --git a/install_hero.sh b/install_hero.sh new file mode 100755 index 00000000..614434be --- /dev/null +++ b/install_hero.sh @@ -0,0 +1,125 @@ +#!/bin/bash -e + +set -e + +os_name="$(uname -s)" +arch_name="$(uname -m)" + +# Select the URL based on the platform +if [[ "$os_name" == "Linux" && "$arch_name" == "x86_64" ]]; then + url="https://f003.backblazeb2.com/file/threefold/linux-i64/hero" +elif [[ "$os_name" == "Darwin" && "$arch_name" == "arm64" ]]; then + url="https://f003.backblazeb2.com/file/threefold/macos-arm64/hero" +# elif [[ "$os_name" == "Darwin" && "$arch_name" == "x86_64" ]]; then +# url="https://f003.backblazeb2.com/file/threefold/macos-i64/hero" +else + echo "Unsupported platform." + exit 1 +fi + +# Check for existing hero installations +existing_hero=$(which hero 2>/dev/null || true) +if [ ! -z "$existing_hero" ]; then + echo "Found existing hero installation at: $existing_hero" + if [ -w "$(dirname "$existing_hero")" ]; then + echo "Removing existing hero installation..." + rm "$existing_hero" || { echo "Error: Failed to remove existing hero binary at $existing_hero"; exit 1; } + else + echo "Error: Cannot remove existing hero installation at $existing_hero (permission denied)" + echo "Please remove it manually with sudo and run this script again" + exit 1 + fi +fi + +if [[ "${OSNAME}" == "darwin"* ]]; then + # Check if /usr/local/bin/hero exists and remove it + if [ -f /usr/local/bin/hero ]; then + rm /usr/local/bin/hero || { echo "Error: Failed to remove existing hero binary"; exit 1; } + fi + + # Check if brew is installed + if ! command -v brew &> /dev/null; then + echo "Homebrew is required but not installed." + read -p "Would you like to install Homebrew? (y/n) " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + echo "Installing Homebrew..." + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" || { + echo "Error: Failed to install Homebrew" + exit 1 + } + else + echo "Homebrew is required to continue. Installation aborted." + exit 1 + fi + fi + + # Update Homebrew + echo "Updating Homebrew..." + if ! brew update; then + echo "Error: Failed to update Homebrew. Please check your internet connection and try again." + exit 1 + fi + + # Upgrade Homebrew packages + echo "Upgrading Homebrew packages..." + if ! brew upgrade; then + echo "Error: Failed to upgrade Homebrew packages. Please check your internet connection and try again." + exit 1 + fi +fi + +if [ -z "$url" ]; then + echo "Could not find url to download." + echo $urls + exit 1 +fi +zprofile="${HOME}/.zprofile" +hero_bin_path="${HOME}/hero/bin" +temp_file="$(mktemp)" + +# Check if ~/.zprofile exists +if [ -f "$zprofile" ]; then + # Read each line and exclude any that modify the PATH with ~/hero/bin + while IFS= read -r line; do + if [[ ! "$line" =~ $hero_bin_path ]]; then + echo "$line" >> "$temp_file" + fi + done < "$zprofile" +else + touch "$zprofile" +fi +# Add ~/hero/bin to the PATH statement +echo "export PATH=\$PATH:$hero_bin_path" >> "$temp_file" +# Replace the original .zprofile with the modified version +mv "$temp_file" "$zprofile" +# Ensure the temporary file is removed (in case of script interruption before mv) +trap 'rm -f "$temp_file"' EXIT + +# Output the selected URL +echo "Download URL for your platform: $url" + +# Download the file +curl -o /tmp/downloaded_file -L "$url" + +# Check if file size is greater than 10 MB +file_size=$(du -m /tmp/downloaded_file | cut -f1) +if [ "$file_size" -ge 2 ]; then + # Create the target directory if it doesn't exist + mkdir -p ~/hero/bin + if [[ "$OSTYPE" == "darwin"* ]]; then + # Move and rename the file + mv /tmp/downloaded_file ~/hero/bin/hero + chmod +x ~/hero/bin/hero + else + mv /tmp/downloaded_file /usr/local/bin/hero + chmod +x /usr/local/bin/hero + fi + + echo "Hero installed properly" + export PATH=$PATH:$hero_bin_path + hero -version +else + echo "Downloaded file is less than 10 MB. Process aborted." + exit 1 +fi diff --git a/install_herolib.vsh b/install_herolib.vsh index 49da3cf2..9097331c 100755 --- a/install_herolib.vsh +++ b/install_herolib.vsh @@ -1,48 +1,49 @@ -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import flag fn addtoscript(tofind string, toadd string) ! { - home_dir := os.home_dir() - mut rc_file := '${home_dir}/.zshrc' - if !os.exists(rc_file) { - rc_file = '${home_dir}/.bashrc' - if !os.exists(rc_file) { - return error('No .zshrc or .bashrc found in home directory') - } - } + home_dir := os.home_dir() + mut rc_file := '${home_dir}/.zshrc' + if !os.exists(rc_file) { + rc_file = '${home_dir}/.bashrc' + if !os.exists(rc_file) { + return error('No .zshrc or .bashrc found in home directory') + } + } - // Read current content - mut content := os.read_file(rc_file)! + // Read current content + mut content := os.read_file(rc_file)! + + // Remove existing alias if present + lines := content.split('\n') + mut new_lines := []string{} + mut prev_is_emtpy := false + for line in lines { + if prev_is_emtpy { + if line.trim_space() == ""{ + continue + }else{ + prev_is_emtpy = false + } + } + if line.trim_space() == ""{ + prev_is_emtpy = true + } - // Remove existing alias if present - lines := content.split('\n') - mut new_lines := []string{} - mut prev_is_emtpy := false - for line in lines { - if prev_is_emtpy { - if line.trim_space() == '' { - continue - } else { - prev_is_emtpy = false - } - } - if line.trim_space() == '' { - prev_is_emtpy = true - } - - if !line.contains(tofind) { - new_lines << line - } - } - new_lines << toadd - new_lines << '' - // Write back to file - new_content := new_lines.join('\n') - os.write_file(rc_file, new_content)! + if !line.contains(tofind) { + new_lines << line + } + } + new_lines << toadd + new_lines << "" + // Write back to file + new_content := new_lines.join('\n') + os.write_file(rc_file, new_content)! } + vroot := @VROOT abs_dir_of_script := dir(@FILE) @@ -51,24 +52,24 @@ println('Resetting all symlinks...') os.rm('${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or {} // Create necessary directories -os.mkdir_all('${os.home_dir()}/.vmodules/freeflowuniverse') or { - panic('Failed to create directory ~/.vmodules/freeflowuniverse: ${err}') +os.mkdir_all('${os.home_dir()}/.vmodules/freeflowuniverse') or { + panic('Failed to create directory ~/.vmodules/freeflowuniverse: ${err}') } // Create new symlinks os.symlink('${abs_dir_of_script}/lib', '${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or { - panic('Failed to create herolib symlink: ${err}') + panic('Failed to create herolib symlink: ${err}') } println('Herolib installation completed successfully!') // Add vtest alias -addtoscript('alias vtest=', "alias vtest='v -stats -enable-globals -n -w -cg -gc none -no-retry-compilation -cc tcc test' ") or { - eprintln('Failed to add vtest alias: ${err}') +addtoscript('alias vtest=', 'alias vtest=\'v -stats -enable-globals -n -w -cg -gc none -cc tcc test\' ') or { + eprintln('Failed to add vtest alias: ${err}') } addtoscript('HOME/hero/bin', 'export PATH="\$PATH:\$HOME/hero/bin"') or { - eprintln('Failed to add path to hero, ${err}') + eprintln('Failed to add path to hero, ${err}') } // ulimit -n 32000 diff --git a/install_v.sh b/install_v.sh index c43737ad..32c28f4f 100755 --- a/install_v.sh +++ b/install_v.sh @@ -1,4 +1,3 @@ - #!/bin/bash -e # Help function @@ -8,12 +7,11 @@ print_help() { echo "Usage: $0 [options]" echo echo "Options:" - echo " -h, --help Show this help message" - echo " --reset Force reinstallation of V" - echo " --remove Remove V installation and exit" - echo " --analyzer Install/update v-analyzer" - echo " --herolib Install our herolib" - echo " --github-actions Install for github actions" + echo " -h, --help Show this help message" + echo " --reset Force reinstallation of V" + echo " --remove Remove V installation and exit" + echo " --analyzer Install/update v-analyzer" + echo " --herolib Install our herolib" echo echo "Examples:" echo " $0" @@ -22,7 +20,6 @@ print_help() { echo " $0 --analyzer " echo " $0 --herolib " echo " $0 --reset --analyzer # Fresh install of both" - echo " $0 --github-actions" echo } @@ -31,7 +28,6 @@ RESET=false REMOVE=false INSTALL_ANALYZER=false HEROLIB=false -IS_GITHUB_ACTIONS=false for arg in "$@"; do case $arg in @@ -51,9 +47,6 @@ for arg in "$@"; do --analyzer) INSTALL_ANALYZER=true ;; - --github-actions) - IS_GITHUB_ACTIONS=true - ;; *) echo "Unknown option: $arg" echo "Use -h or --help to see available options" @@ -99,19 +92,35 @@ function package_check_install { function package_install { local command_name="$1" if [[ "${OSNAME}" == "ubuntu" ]]; then - sudo apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential + if is_github_actions; then + sudo apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential + else + apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential + fi + elif [[ "${OSNAME}" == "darwin"* ]]; then brew install $command_name elif [[ "${OSNAME}" == "alpine"* ]]; then - sudo apk add $command_name + apk add $command_name elif [[ "${OSNAME}" == "arch"* ]]; then - sudo pacman --noconfirm -Su $command_name + pacman --noconfirm -Su $command_name else echo "platform : ${OSNAME} not supported" exit 1 fi } +is_github_actions() { + # echo "Checking GitHub Actions environment..." + # echo "GITHUB_ACTIONS=${GITHUB_ACTIONS:-not set}" + if [ -n "$GITHUB_ACTIONS" ] && [ "$GITHUB_ACTIONS" = "true" ]; then + echo "Running in GitHub Actions: true" + return 0 + else + echo "Running in GitHub Actions: false" + return 1 + fi +} function myplatform { @@ -149,26 +158,26 @@ myplatform function os_update { echo ' - os update' if [[ "${OSNAME}" == "ubuntu" ]]; then - if [ "$IS_GITHUB_ACTIONS" = true ]; then + if is_github_actions; then echo "github actions" else - sudo rm -f /var/lib/apt/lists/lock - sudo rm -f /var/cache/apt/archives/lock - sudo rm -f /var/lib/dpkg/lock* + rm -f /var/lib/apt/lists/lock + rm -f /var/cache/apt/archives/lock + rm -f /var/lib/dpkg/lock* fi export TERM=xterm export DEBIAN_FRONTEND=noninteractive - dpkg --configure -a + sudo dpkg --configure -a sudo apt update -y - if [ "$IS_GITHUB_ACTIONS" = true ]; then + if is_github_actions; then echo "** IN GITHUB ACTIONS, DON'T DO UPDATE" - else - set +e + else + set +e echo "** UPDATE" - sudo apt-mark hold grub-efi-amd64-signed + apt-mark hold grub-efi-amd64-signed set -e - sudo apt upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes - sudo apt autoremove -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes + apt upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes + apt autoremove -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes fi #apt install apt-transport-https ca-certificates curl software-properties-common -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes package_install "apt-transport-https ca-certificates curl wget software-properties-common tmux" @@ -183,17 +192,16 @@ function os_update { unset NONINTERACTIVE fi set +e - brew update brew install mc redis curl tmux screen htop wget rclone tcc set -e elif [[ "${OSNAME}" == "alpine"* ]]; then - sudo apk update screen git htop tmux - sudo apk add mc curl rsync htop redis bash bash-completion screen git rclone + apk update screen git htop tmux + apk add mc curl rsync htop redis bash bash-completion screen git rclone sed -i 's#/bin/ash#/bin/bash#g' /etc/passwd elif [[ "${OSNAME}" == "arch"* ]]; then - sudo pacman -Syy --noconfirm - sudo pacman -Syu --noconfirm - sudo pacman -Su --noconfirm arch-install-scripts gcc mc git tmux curl htop redis wget screen net-tools git sudo htop ca-certificates lsb-release screen rclone + pacman -Syy --noconfirm + pacman -Syu --noconfirm + pacman -Su --noconfirm arch-install-scripts gcc mc git tmux curl htop redis wget screen net-tools git sudo htop ca-certificates lsb-release screen rclone # Check if builduser exists, create if not if ! id -u builduser > /dev/null 2>&1; then @@ -202,9 +210,9 @@ function os_update { echo 'builduser ALL=(ALL) NOPASSWD: ALL' | tee /etc/sudoers.d/builduser fi - if [[ -n "${DEBUG}" ]]; then - execute_with_marker "paru_install" paru_install - fi + # if [[ -n "${DEBUG}" ]]; then + # execute_with_marker "paru_install" paru_install + # fi fi echo ' - os update done' } @@ -239,7 +247,7 @@ function install_secp256k1 { brew install secp256k1 elif [[ "${OSNAME}" == "ubuntu" ]]; then # Install build dependencies - sudo apt-get install -y build-essential wget autoconf libtool + package_install "build-essential wget autoconf libtool" # Download and extract secp256k1 cd "${DIR_BUILD}" @@ -251,8 +259,12 @@ function install_secp256k1 { ./autogen.sh ./configure make -j 5 - sudo make install - + if is_github_actions; then + sudo make install + else + make install + fi + # Cleanup cd .. rm -rf secp256k1-0.3.2 v0.3.2.tar.gz @@ -304,10 +316,33 @@ remove_all() { # Function to check if a service is running and start it if needed check_and_start_redis() { + + # Normal service management for non-container environments if [[ "${OSNAME}" == "ubuntu" ]] || [[ "${OSNAME}" == "debian" ]]; then + # Handle Redis installation for GitHub Actions environment + if is_github_actions; then + + # Import Redis GPG key + curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg + # Add Redis repository + echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list + # Install Redis + sudo apt-get update + sudo apt-get install -y redis + + # Start Redis + redis-server --daemonize yes + + # Print versions + redis-cli --version + redis-server --version + + return + fi + # Check if running inside a container if grep -q "/docker/" /proc/1/cgroup || [ ! -d "/run/systemd/system" ]; then echo "Running inside a container. Starting redis directly." @@ -351,7 +386,7 @@ check_and_start_redis() { echo "redis is already running." else echo "redis is not running. Starting it..." - sudo rc-service "redis" start + rc-service "redis" start fi elif [[ "${OSNAME}" == "arch"* ]]; then if systemctl is-active --quiet "redis"; then @@ -366,6 +401,74 @@ check_and_start_redis() { fi } +v-install() { + + # Only clone and install if directory doesn't exist + if [ ! -d ~/code/v ]; then + echo "Installing V..." + mkdir -p ~/_code + cd ~/_code + git clone --depth=1 https://github.com/vlang/v + cd v + make + sudo ./v symlink + fi + + # Verify v is in path + if ! command_exists v; then + echo "Error: V installation failed or not in PATH" + echo "Please ensure ~/code/v is in your PATH" + exit 1 + fi + + echo "V installation successful!" + +} + + +v-analyzer() { + + # Install v-analyzer if requested + if [ "$INSTALL_ANALYZER" = true ]; then + echo "Installing v-analyzer..." + v download -RD https://raw.githubusercontent.com/vlang/v-analyzer/main/install.vsh + + # Check if v-analyzer bin directory exists + if [ ! -d "$HOME/.config/v-analyzer/bin" ]; then + echo "Error: v-analyzer bin directory not found at $HOME/.config/v-analyzer/bin" + echo "Please ensure v-analyzer was installed correctly" + exit 1 + fi + + echo "v-analyzer installation successful!" + fi + + # Add v-analyzer to PATH if installed + if [ -d "$HOME/.config/v-analyzer/bin" ]; then + V_ANALYZER_PATH='export PATH="$PATH:$HOME/.config/v-analyzer/bin"' + + # Function to add path to rc file if not present + add_to_rc() { + local RC_FILE="$1" + if [ -f "$RC_FILE" ]; then + if ! grep -q "v-analyzer/bin" "$RC_FILE"; then + echo "" >> "$RC_FILE" + echo "$V_ANALYZER_PATH" >> "$RC_FILE" + echo "Added v-analyzer to $RC_FILE" + else + echo "v-analyzer path already exists in $RC_FILE" + fi + fi + } + + # Add to both .zshrc and .bashrc if they exist + add_to_rc ~/.zshrc + if [ "$(uname)" = "Darwin" ] && [ -f ~/.bashrc ]; then + add_to_rc ~/.bashrc + fi + fi +} + # Handle remove if requested @@ -394,72 +497,15 @@ if [ "$RESET" = true ] || ! command_exists v; then # Install secp256k1 install_secp256k1 - # Only clone and install if directory doesn't exist - if [ ! -d ~/code/v ]; then - echo "Installing V..." - mkdir -p ~/_code - cd ~/_code - git clone --depth=1 https://github.com/vlang/v - cd v - make - sudo ./v symlink + v-install + + # Only install v-analyzer if not in GitHub Actions environment + if ! is_github_actions; then + v-analyzer fi - # Verify v is in path - if ! command_exists v; then - echo "Error: V installation failed or not in PATH" - echo "Please ensure ~/code/v is in your PATH" - exit 1 - fi - echo "V installation successful!" fi -# Install v-analyzer if requested -if [ "$INSTALL_ANALYZER" = true ]; then - echo "Installing v-analyzer..." - v download -RD https://raw.githubusercontent.com/vlang/v-analyzer/main/install.vsh - - # Check if v-analyzer bin directory exists - if [ ! -d "$HOME/.config/v-analyzer/bin" ]; then - echo "Error: v-analyzer bin directory not found at $HOME/.config/v-analyzer/bin" - echo "Please ensure v-analyzer was installed correctly" - exit 1 - fi - - echo "v-analyzer installation successful!" -fi - -# Add v-analyzer to PATH if installed -if [ -d "$HOME/.config/v-analyzer/bin" ]; then - V_ANALYZER_PATH='export PATH="$PATH:$HOME/.config/v-analyzer/bin"' - - # Function to add path to rc file if not present - add_to_rc() { - local RC_FILE="$1" - if [ -f "$RC_FILE" ]; then - if ! grep -q "v-analyzer/bin" "$RC_FILE"; then - echo "" >> "$RC_FILE" - echo "$V_ANALYZER_PATH" >> "$RC_FILE" - echo "Added v-analyzer to $RC_FILE" - else - echo "v-analyzer path already exists in $RC_FILE" - fi - fi - } - - # Add to both .zshrc and .bashrc if they exist - add_to_rc ~/.zshrc - if [ "$(uname)" = "Darwin" ] && [ -f ~/.bashrc ]; then - add_to_rc ~/.bashrc - fi -fi - -# Final verification -if ! command_exists v; then - echo "Error: V is not accessible in PATH" - echo "Please add ~/code/v to your PATH and try again" - exit 1 -fi check_and_start_redis @@ -469,9 +515,9 @@ if [ "$HEROLIB" = true ]; then fi -# if [ "$INSTALL_ANALYZER" = true ]; then -# echo "Run 'source ~/.bashrc' or 'source ~/.zshrc' to update PATH for v-analyzer" -# fi +if [ "$INSTALL_ANALYZER" = true ]; then + echo "Run 'source ~/.bashrc' or 'source ~/.zshrc' to update PATH for v-analyzer" +fi echo "Installation complete!" diff --git a/lib/builder/builder_factory.v b/lib/builder/builder_factory.v index bd2d2a57..02599ee5 100644 --- a/lib/builder/builder_factory.v +++ b/lib/builder/builder_factory.v @@ -7,7 +7,7 @@ pub struct BuilderFactory { } pub fn new() !BuilderFactory { - mut c := base.context()! + _ := base.context()! mut bf := BuilderFactory{} return bf } diff --git a/lib/clients/livekit/client.v b/lib/clients/livekit/client.v index 44e16c98..79158ae7 100644 --- a/lib/clients/livekit/client.v +++ b/lib/clients/livekit/client.v @@ -3,7 +3,7 @@ module livekit // App struct with `livekit.Client`, API keys, and other shared data pub struct Client { pub: - url string @[required] - api_key string @[required] - api_secret string @[required] + url string @[required] + api_key string @[required] + api_secret string @[required] } diff --git a/lib/clients/livekit/factory.v b/lib/clients/livekit/factory.v index 033eb2f9..31f6efe6 100644 --- a/lib/clients/livekit/factory.v +++ b/lib/clients/livekit/factory.v @@ -1,6 +1,7 @@ - module livekit pub fn new(client Client) Client { - return Client{...client} -} \ No newline at end of file + return Client{ + ...client + } +} diff --git a/lib/clients/livekit/room.v b/lib/clients/livekit/room.v index cce17e32..5687ed3d 100644 --- a/lib/clients/livekit/room.v +++ b/lib/clients/livekit/room.v @@ -5,47 +5,46 @@ import json @[params] pub struct ListRoomsParams { - names []string + names []string } pub struct ListRoomsResponse { pub: - rooms []Room + rooms []Room } pub fn (c Client) list_rooms(params ListRoomsParams) !ListRoomsResponse { - // Prepare request body - request := params - request_json := json.encode(request) + // Prepare request body + request := params + request_json := json.encode(request) - - // create token and give grant to list rooms + // create token and give grant to list rooms mut token := c.new_access_token()! - token.grants.video.room_list = true + token.grants.video.room_list = true - // make POST request - url := '${c.url}/twirp/livekit.RoomService/ListRooms' - // Configure HTTP request - mut headers := http.new_header_from_map({ - http.CommonHeader.authorization: 'Bearer ${token.to_jwt()!}', - http.CommonHeader.content_type: 'application/json' - }) + // make POST request + url := '${c.url}/twirp/livekit.RoomService/ListRooms' + // Configure HTTP request + mut headers := http.new_header_from_map({ + http.CommonHeader.authorization: 'Bearer ${token.to_jwt()!}' + http.CommonHeader.content_type: 'application/json' + }) - response := http.fetch(http.FetchConfig{ - url: url - method: .post - header: headers - data: request_json - })! + response := http.fetch(http.FetchConfig{ + url: url + method: .post + header: headers + data: request_json + })! - if response.status_code != 200 { - return error('Failed to list rooms: $response.status_code') - } + if response.status_code != 200 { + return error('Failed to list rooms: ${response.status_code}') + } - // Parse response - rooms_response := json.decode(ListRoomsResponse, response.body) or { - return error('Failed to parse response: $err') - } - - return rooms_response + // Parse response + rooms_response := json.decode(ListRoomsResponse, response.body) or { + return error('Failed to parse response: ${err}') + } + + return rooms_response } diff --git a/lib/clients/livekit/room_model.v b/lib/clients/livekit/room_model.v index 119c9a67..4586b545 100644 --- a/lib/clients/livekit/room_model.v +++ b/lib/clients/livekit/room_model.v @@ -5,29 +5,29 @@ import json pub struct Codec { pub: - fmtp_line string - mime string + fmtp_line string + mime string } pub struct Version { pub: - ticks u64 - unix_micro string + ticks u64 + unix_micro string } pub struct Room { pub: - active_recording bool - creation_time string - departure_timeout int - empty_timeout int - enabled_codecs []Codec - max_participants int - metadata string - name string - num_participants int - num_publishers int - sid string - turn_password string - version Version -} \ No newline at end of file + active_recording bool + creation_time string + departure_timeout int + empty_timeout int + enabled_codecs []Codec + max_participants int + metadata string + name string + num_participants int + num_publishers int + sid string + turn_password string + version Version +} diff --git a/lib/clients/livekit/room_test.v b/lib/clients/livekit/room_test.v index 2bd9d3fe..de9d8e4d 100644 --- a/lib/clients/livekit/room_test.v +++ b/lib/clients/livekit/room_test.v @@ -6,20 +6,20 @@ import freeflowuniverse.herolib.osal const env_file = '${os.dir(@FILE)}/.env' fn testsuite_begin() ! { - if os.exists(env_file) { - osal.load_env_file(env_file)! - } + if os.exists(env_file) { + osal.load_env_file(env_file)! + } } fn new_test_client() Client { - return new( - url: os.getenv('LIVEKIT_URL') - api_key: os.getenv('LIVEKIT_API_KEY') - api_secret: os.getenv('LIVEKIT_API_SECRET') - ) + return new( + url: os.getenv('LIVEKIT_URL') + api_key: os.getenv('LIVEKIT_API_KEY') + api_secret: os.getenv('LIVEKIT_API_SECRET') + ) } fn test_client_list_rooms() ! { - client := new_test_client() - rooms := client.list_rooms()! + client := new_test_client() + rooms := client.list_rooms()! } diff --git a/lib/clients/livekit/token.v b/lib/clients/livekit/token.v index a1cdd881..ea30877c 100644 --- a/lib/clients/livekit/token.v +++ b/lib/clients/livekit/token.v @@ -10,25 +10,25 @@ import json // Define AccessTokenOptions struct @[params] pub struct AccessTokenOptions { - pub mut: - ttl int = 21600// TTL in seconds - name string // Display name for the participant - identity string // Identity of the user - metadata string // Custom metadata to be passed to participants +pub mut: + ttl int = 21600 // TTL in seconds + name string // Display name for the participant + identity string // Identity of the user + metadata string // Custom metadata to be passed to participants } // Constructor for AccessToken pub fn (client Client) new_access_token(options AccessTokenOptions) !AccessToken { return AccessToken{ - api_key: client.api_key + api_key: client.api_key api_secret: client.api_secret - identity: options.identity - ttl: options.ttl - grants: ClaimGrants{ - exp: time.now().unix()+ options.ttl - iss: client.api_key - sub: options.name + identity: options.identity + ttl: options.ttl + grants: ClaimGrants{ + exp: time.now().unix() + options.ttl + iss: client.api_key + sub: options.name name: options.name } } -} \ No newline at end of file +} diff --git a/lib/clients/livekit/token_model.v b/lib/clients/livekit/token_model.v index 84a6b352..4cb33d67 100644 --- a/lib/clients/livekit/token_model.v +++ b/lib/clients/livekit/token_model.v @@ -10,23 +10,23 @@ import json // Struct representing grants pub struct ClaimGrants { pub mut: - video VideoGrant - iss string - exp i64 - nbf int - sub string - name string + video VideoGrant + iss string + exp i64 + nbf int + sub string + name string } // VideoGrant struct placeholder pub struct VideoGrant { pub mut: - room string - room_join bool @[json: 'roomJoin'] - room_list bool @[json: 'roomList'] - can_publish bool @[json: 'canPublish'] - can_publish_data bool @[json: 'canPublishData'] - can_subscribe bool @[json: 'canSubscribe'] + room string + room_join bool @[json: 'roomJoin'] + room_list bool @[json: 'roomList'] + can_publish bool @[json: 'canPublish'] + can_publish_data bool @[json: 'canPublishData'] + can_subscribe bool @[json: 'canSubscribe'] } // SIPGrant struct placeholder @@ -34,12 +34,12 @@ struct SIPGrant {} // AccessToken class pub struct AccessToken { - mut: - api_key string - api_secret string - grants ClaimGrants - identity string - ttl int +mut: + api_key string + api_secret string + grants ClaimGrants + identity string + ttl int } // Method to add a video grant to the token @@ -65,7 +65,8 @@ pub fn (token AccessToken) to_jwt() !string { unsigned_token := '${header_encoded}.${payload_encoded}' // Create the HMAC-SHA256 signature - signature := hmac.new(token.api_secret.bytes(), unsigned_token.bytes(), sha256.sum, sha256.block_size) + signature := hmac.new(token.api_secret.bytes(), unsigned_token.bytes(), sha256.sum, + sha256.block_size) // Encode the signature in base64 signature_encoded := base64.url_encode(signature) @@ -73,4 +74,4 @@ pub fn (token AccessToken) to_jwt() !string { // Create the final JWT jwt := '${unsigned_token}.${signature_encoded}' return jwt -} \ No newline at end of file +} diff --git a/lib/clients/mailclient/mailclient_factory.v b/lib/clients/mailclient/mailclient_factory.v deleted file mode 100644 index 258b0c58..00000000 --- a/lib/clients/mailclient/mailclient_factory.v +++ /dev/null @@ -1,107 +0,0 @@ -module mailclient - -import freeflowuniverse.herolib.core.base -// import freeflowuniverse.herolib.core.playbook - -// __global ( -// mailclient_global map[string]&MailClient -// mailclient_default string -// ) - -// /////////FACTORY - -// @[params] -// pub struct ArgsGet { -// pub mut: -// name string = 'default' -// } - -// fn args_get(args_ ArgsGet) ArgsGet { -// mut args := args_ -// if args.name == '' { -// args.name = mailclient_default -// } -// if args.name == '' { -// args.name = 'default' -// } -// return args -// } - -// pub fn get(args_ ArgsGet) !&MailClient { -// mut args := args_get(args_) -// if args.name !in mailclient_global { -// if !config_exists() { -// if default { -// config_save()! -// } -// } -// config_load()! -// } -// return mailclient_global[args.name] or { panic('bug') } -// } - -// // switch instance to be used for mailclient -// pub fn switch(name string) { -// mailclient_default = name -// } - -fn config_exists(args_ ArgsGet) bool { - mut args := args_get(args_) - mut context := base.context() or { panic('bug') } - return context.hero_config_exists('mailclient', args.name) -} - -// fn config_load(args_ ArgsGet) ! { -// mut args := args_get(args_) -// mut context := base.context()! -// mut heroscript := context.hero_config_get('mailclient', args.name)! -// play(heroscript: heroscript)! -// } - -// fn config_save(args_ ArgsGet) ! { -// mut args := args_get(args_) -// mut context := base.context()! -// context.hero_config_set('mailclient', args.name, heroscript_default())! -// } - -// fn set(o MailClient) ! { -// mut o2 := obj_init(o)! -// mailclient_global['default'] = &o2 -// } - -// @[params] -// pub struct InstallPlayArgs { -// pub mut: -// name string = 'default' -// heroscript string // if filled in then plbook will be made out of it -// plbook ?playbook.PlayBook -// reset bool -// start bool -// stop bool -// restart bool -// delete bool -// configure bool // make sure there is at least one installed -// } - -// pub fn play(args_ InstallPlayArgs) ! { -// mut args := args_ -// println('debguzo1') -// mut plbook := args.plbook or { -// println('debguzo2') -// heroscript := if args.heroscript == '' { -// heroscript_default() -// } else { -// args.heroscript -// } -// playbook.new(text: heroscript)! -// } - -// mut install_actions := plbook.find(filter: 'mailclient.configure')! -// println('debguzo3 ${install_actions}') -// if install_actions.len > 0 { -// for install_action in install_actions { -// mut p := install_action.params -// cfg_play(p)! -// } -// } -// } diff --git a/lib/clients/mailclient/mailclient_factory_.v b/lib/clients/mailclient/mailclient_factory_.v index 489a45fd..c100f937 100644 --- a/lib/clients/mailclient/mailclient_factory_.v +++ b/lib/clients/mailclient/mailclient_factory_.v @@ -1,103 +1,127 @@ + module mailclient import freeflowuniverse.herolib.core.base import freeflowuniverse.herolib.core.playbook import freeflowuniverse.herolib.ui.console -import freeflowuniverse.herolib.data.encoderhero + __global ( - mailclient_global map[string]&MailClient - mailclient_default string + mailclient_global map[string]&MailClient + mailclient_default string ) /////////FACTORY @[params] -pub struct ArgsGet { +pub struct ArgsGet{ pub mut: - name string + name string } -fn args_get(args_ ArgsGet) ArgsGet { - mut model := args_ - if model.name == '' { - model.name = mailclient_default - } - if model.name == '' { - model.name = 'default' - } - return model +fn args_get (args_ ArgsGet) ArgsGet { + mut args:=args_ + if args.name == ""{ + args.name = mailclient_default + } + if args.name == ""{ + args.name = "default" + } + return args } -pub fn get(args_ ArgsGet) !&MailClient { - mut args := args_get(args_) - if args.name !in mailclient_global { - if args.name == 'default' { - if !config_exists(args) { - if default { - mut context := base.context() or { panic('bug') } - context.hero_config_set('mailclient', args.name, heroscript_default())! - } - } - load(args)! - } - } - return mailclient_global[args.name] or { - println(mailclient_global) - panic('could not get config for ${args.name} with name:${args.name}') - } +pub fn get(args_ ArgsGet) !&MailClient { + mut args := args_get(args_) + if !(args.name in mailclient_global) { + if ! config_exists(args){ + config_save(args)! + } + config_load(args)! + } + return mailclient_global[args.name] or { + println(mailclient_global) + //bug if we get here because should be in globals + panic("could not get config for mailclient with name, is bug:${args.name}") + } } -// set the model in mem and the config on the filesystem -pub fn set(o MailClient) ! { - mut o2 := obj_init(o)! - mailclient_global[o.name] = &o2 - mailclient_default = o.name + + +pub fn config_exists(args_ ArgsGet) bool { + mut args := args_get(args_) + mut context:=base.context() or { panic("bug") } + return context.hero_config_exists("mailclient",args.name) } -// check we find the config on the filesystem -pub fn exists(args_ ArgsGet) bool { - mut model := args_get(args_) - mut context := base.context() or { panic('bug') } - return context.hero_config_exists('mailclient', model.name) +pub fn config_load(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context:=base.context()! + mut heroscript := context.hero_config_get("mailclient",args.name)! + play(heroscript:heroscript)! } -// load the config error if it doesn't exist -pub fn load(args_ ArgsGet) ! { - mut model := args_get(args_) - mut context := base.context()! - mut heroscript := context.hero_config_get('mailclient', model.name)! - play(heroscript: heroscript)! +pub fn config_save(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context:=base.context()! + context.hero_config_set("mailclient",args.name,heroscript_default(instance:args.name)!)! +} + + +pub fn config_delete(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context:=base.context()! + context.hero_config_delete("mailclient",args.name)! +} + +fn set(o MailClient)! { + mut o2:=obj_init(o)! + mailclient_global[o.name] = &o2 + mailclient_default = o.name } -// // save the config to the filesystem in the context -// pub fn save(o MailClient) ! { -// mut context := base.context()! -// heroscript := encoderhero.encode[MailClient](o)! -// context.hero_config_set('mailclient', model.name, heroscript)! -// } @[params] pub struct PlayArgs { pub mut: - heroscript string // if filled in then plbook will be made out of it - plbook ?playbook.PlayBook - reset bool + heroscript string //if filled in then plbook will be made out of it + plbook ?playbook.PlayBook + reset bool } pub fn play(args_ PlayArgs) ! { - mut model := args_ + + mut args:=args_ + + if args.heroscript == "" { + args.heroscript = heroscript_default()! + } + mut plbook := args.plbook or { + playbook.new(text: args.heroscript)! + } + + mut install_actions := plbook.find(filter: 'mailclient.configure')! + if install_actions.len > 0 { + for install_action in install_actions { + mut p := install_action.params + cfg_play(p)! + } + } - if model.heroscript == '' { - model.heroscript = heroscript_default() - } - mut plbook := model.plbook or { playbook.new(text: model.heroscript)! } - mut configure_actions := plbook.find(filter: 'mailclient.configure')! - if configure_actions.len > 0 { - for config_action in configure_actions { - mut p := config_action.params - cfg_play(p)! - } - } +} + + + + +//switch instance to be used for mailclient +pub fn switch(name string) { + mailclient_default = name +} + + +//helpers + +@[params] +pub struct DefaultConfigArgs{ + instance string = 'default' } diff --git a/lib/clients/mailclient/mailclient_model.v b/lib/clients/mailclient/mailclient_model.v index 79b4690a..85cde253 100644 --- a/lib/clients/mailclient/mailclient_model.v +++ b/lib/clients/mailclient/mailclient_model.v @@ -1,21 +1,21 @@ module mailclient - import freeflowuniverse.herolib.data.paramsparser import os -pub const version = '1.0.0' +pub const version = '0.0.0' const singleton = false const default = true -pub fn heroscript_default() string { + +pub fn heroscript_default(args DefaultConfigArgs) !string { mail_from := os.getenv_opt('MAIL_FROM') or { 'info@example.com' } mail_password := os.getenv_opt('MAIL_PASSWORD') or { 'secretpassword' } mail_port := (os.getenv_opt('MAIL_PORT') or { '465' }).int() mail_server := os.getenv_opt('MAIL_SERVER') or { 'smtp-relay.brevo.com' } - mail_username := os.getenv_opt('MAIL_USERNAME') or { 'kristof@incubaid.com' } + mail_username := os.getenv_opt('MAIL_USERNAME') or { 'mail@incubaid.com' } heroscript := " -!!mailclient.configure name:'default' +!!mailclient.configure name:'${args.instance}' mail_from: '${mail_from}' mail_password: '${mail_password}' mail_port: ${mail_port} @@ -23,9 +23,10 @@ pub fn heroscript_default() string { mail_username: '${mail_username}' " - return heroscript + return heroscript } +@[heap] pub struct MailClient { pub mut: name string = 'default' @@ -39,31 +40,22 @@ pub mut: } fn cfg_play(p paramsparser.Params) ! { - mut mycfg := MailClient{ + mut mycfg := MailClient{ name: p.get_default('name', 'default')! mail_from: p.get('mail_from')! mail_password: p.get('mail_password')! mail_port: p.get_int_default('mail_port', 465)! mail_server: p.get('mail_server')! mail_username: p.get('mail_username')! - } - set(mycfg)! + } + set(mycfg)! +} + + +fn obj_init(obj_ MailClient)!MailClient{ + mut obj:=obj_ + return obj } -fn obj_init(obj_ MailClient) !MailClient { - // never call get here, only thing we can do here is work on object itself - mut obj := obj_ - return obj -} -// user needs to us switch to make sure we get the right object -pub fn configure(config MailClient) !MailClient { - client := MailClient{ - ...config - } - set(client)! - return client - // THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED - // implement if steps need to be done for configuration -} diff --git a/lib/clients/mailclient/readme.md b/lib/clients/mailclient/readme.md index e98a40f2..b74b212d 100644 --- a/lib/clients/mailclient/readme.md +++ b/lib/clients/mailclient/readme.md @@ -1,16 +1,29 @@ # mailclient - To get started ```vlang -import freeflowuniverse.herolib.clients. mailclient +import freeflowuniverse.herolib.clients.mailclient -mut client:= mailclient.get()! -client.send(subject:'this is a test',to:'kds@something.com,kds2@else.com',body:' +//remove the previous one, otherwise the env variables are not read +mailclient.config_delete(name:"test")! + +// env variables which need to be set are: +// - MAIL_FROM=... +// - MAIL_PASSWORD=... +// - MAIL_PORT=465 +// - MAIL_SERVER=... +// - MAIL_USERNAME=... + + +mut client:= mailclient.get(name:"test")! + +println(client) + +client.send(subject:'this is a test',to:'kristof@incubaid.com',body:' this is my email content ')! diff --git a/lib/clients/mycelium/readme.md b/lib/clients/mycelium/readme.md index 598f250c..562e19a5 100644 --- a/lib/clients/mycelium/readme.md +++ b/lib/clients/mycelium/readme.md @@ -33,7 +33,7 @@ Note: Configuration is not needed if using a locally running Mycelium server wit Save as `mycelium_example.vsh`: ```v -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.clients.mycelium diff --git a/lib/clients/postgresql_client/client.v b/lib/clients/postgresql_client/client.v deleted file mode 100644 index 7027b20c..00000000 --- a/lib/clients/postgresql_client/client.v +++ /dev/null @@ -1,56 +0,0 @@ -module postgresql_client - -import freeflowuniverse.herolib.core.base -import db.pg -import freeflowuniverse.herolib.core.texttools -import freeflowuniverse.herolib.ui.console - -// pub struct PostgresClient { -// base.BaseConfig -// pub mut: -// config Config -// db pg.DB -// } - -// @[params] -// pub struct ClientArgs { -// pub mut: -// instance string @[required] -// // playargs ?play.PlayArgs -// } - -// pub fn get(clientargs ClientArgs) !PostgresClient { -// // mut plargs := clientargs.playargs or { -// // // play.PlayArgs -// // // { -// // // } -// // } - -// // mut cfg := configurator(clientargs.instance, plargs)! -// // mut args := cfg.get()! - -// args.instance = texttools.name_fix(args.instance) -// if args.instance == '' { -// args.instance = 'default' -// } -// // console.print_debug(args) -// mut db := pg.connect( -// host: args.host -// user: args.user -// port: args.port -// password: args.password -// dbname: args.dbname -// )! -// // console.print_debug(postgres_client) -// return PostgresClient{ -// instance: args.instance -// db: db -// config: args -// } -// } - -// struct LocalConfig { -// name string -// path string -// passwd string -// } diff --git a/lib/clients/postgresql_client/postgresql_client_model.v b/lib/clients/postgresql_client/postgresql_client_model.v index d645fdeb..67a06032 100644 --- a/lib/clients/postgresql_client/postgresql_client_model.v +++ b/lib/clients/postgresql_client/postgresql_client_model.v @@ -55,7 +55,7 @@ fn obj_init(obj_ PostgresClient) !PostgresClient { return obj } -fn (mut self PostgresClient) db() !pg.DB { +pub fn (mut self PostgresClient) db() !pg.DB { // console.print_debug(args) mut db := self.db_ or { mut db_ := pg.connect( diff --git a/lib/clients/postgresql_client/readme.md b/lib/clients/postgresql_client/readme.md index 3daad8d2..054fae0d 100644 --- a/lib/clients/postgresql_client/readme.md +++ b/lib/clients/postgresql_client/readme.md @@ -9,29 +9,52 @@ The PostgreSQL client can be configured using HeroScript. Configuration settings ### Basic Configuration Example ```v -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core -import os import freeflowuniverse.herolib.clients.postgresql_client + +// Configure PostgreSQL client heroscript := " !!postgresql_client.configure - name:'test' - user: 'root' - port: 5432 - host: 'localhost' - password: '1234' - dbname: 'postgres' + name:'test' + user: 'postgres' + port: 5432 + host: 'localhost' + password: '1234' + dbname: 'postgres' " -// Process the heroscript -postgresql_client.play(heroscript:heroscript)! +// Process the heroscript configuration +postgresql_client.play(heroscript: heroscript)! // Get the configured client -mut db_client := postgresql_client.get(name:"test")! +mut db_client := postgresql_client.get(name: "test")! + +// Check if test database exists, create if not +if !db_client.db_exists('test')! { + println('Creating database test...') + db_client.db_create('test')! +} + +// Switch to test database +db_client.dbname = 'test' + +// Create table if not exists +create_table_sql := "CREATE TABLE IF NOT EXISTS users ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL, + email VARCHAR(255) UNIQUE NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +)" + +println('Creating table users if not exists...') +db_client.exec(create_table_sql)! + +println('Database and table setup completed successfully!') + -println(db_client) ``` ### Configuration Parameters @@ -94,20 +117,3 @@ db_client.backup(dest: '/path/to/backup/dir')! Backups are created in custom PostgreSQL format (.bak files) which can be restored using pg_restore. -## Default Configuration - -If no configuration is provided, the client uses these default settings: - -```v -heroscript := " -!!postgresql_client.configure - name:'default' - user: 'root' - port: 5432 - host: 'localhost' - password: '' - dbname: 'postgres' -" -``` - -You can override these defaults by providing your own configuration using the HeroScript configure command. diff --git a/lib/code/generator/installer_client/readme.md b/lib/code/generator/installer_client/readme.md index fabcd3de..dfadd322 100644 --- a/lib/code/generator/installer_client/readme.md +++ b/lib/code/generator/installer_client/readme.md @@ -52,7 +52,7 @@ this is to make distinction between processing at compile time (pre-compile) or to call in code ```v -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.code.generator.generic diff --git a/lib/conversiontools/docsorter/readme.md b/lib/conversiontools/docsorter/readme.md index 4f30a86f..9b49d071 100644 --- a/lib/conversiontools/docsorter/readme.md +++ b/lib/conversiontools/docsorter/readme.md @@ -10,7 +10,7 @@ How to use ## example ```v -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run import os import import freeflowuniverse.herolib.conversiontools.docsorter diff --git a/lib/core/base/context.v b/lib/core/base/context.v index 8e174752..f0de79cc 100644 --- a/lib/core/base/context.v +++ b/lib/core/base/context.v @@ -138,6 +138,13 @@ pub fn (mut self Context) hero_config_set(cat string, name string, content_ stri config_file.write(content)! } +pub fn (mut self Context) hero_config_delete(cat string, name string) ! { + path := '${self.path()!.path}/${cat}__${name}.yaml' + mut config_file := pathlib.get_file(path: path)! + config_file.delete()! +} + + pub fn (mut self Context) hero_config_exists(cat string, name string) bool { path := '${os.home_dir()}/hero/context/${self.config.name}/${cat}__${name}.yaml' return os.exists(path) diff --git a/lib/core/generator/generic/readme.md b/lib/core/generator/generic/readme.md index a458cbf3..1f9254ab 100644 --- a/lib/core/generator/generic/readme.md +++ b/lib/core/generator/generic/readme.md @@ -69,7 +69,7 @@ this is to make distinction between processing at compile time (pre-compile) or to call in code ```v -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.core.generator.generic diff --git a/lib/core/generator/generic/templates/objname_actions.vtemplate b/lib/core/generator/generic/templates/objname_actions.vtemplate index ac4f00d7..15cb025a 100644 --- a/lib/core/generator/generic/templates/objname_actions.vtemplate +++ b/lib/core/generator/generic/templates/objname_actions.vtemplate @@ -9,9 +9,9 @@ import freeflowuniverse.herolib.core.pathlib import freeflowuniverse.herolib.osal.systemd import freeflowuniverse.herolib.osal.zinit @end +import freeflowuniverse.herolib.installers.ulist @if args.build -import freeflowuniverse.herolib.installers.ulist import freeflowuniverse.herolib.installers.lang.golang import freeflowuniverse.herolib.installers.lang.rust import freeflowuniverse.herolib.installers.lang.python @@ -82,7 +82,7 @@ fn stop_post()!{ // checks if a certain version or above is installed fn installed() !bool { //THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED - // res := os.execute('??{osal.profile_path_source_and()} ${args.name} version') + // res := os.execute('??{osal.profile_path_source_and()!} ${args.name} version') // if res.exit_code != 0 { // return false // } diff --git a/lib/core/generator/generic/templates/objname_factory_.vtemplate b/lib/core/generator/generic/templates/objname_factory_.vtemplate index 38c1684a..429d0d49 100644 --- a/lib/core/generator/generic/templates/objname_factory_.vtemplate +++ b/lib/core/generator/generic/templates/objname_factory_.vtemplate @@ -1,9 +1,9 @@ - module ${args.name} import freeflowuniverse.herolib.core.base import freeflowuniverse.herolib.core.playbook import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.data.paramsparser @if args.cat == .installer import freeflowuniverse.herolib.sysadmin.startupmanager @@ -27,9 +27,6 @@ pub mut: @if args.hasconfig fn args_get (args_ ArgsGet) ArgsGet { mut args:=args_ - if args.name == ""{ - args.name = ${args.name}_default - } if args.name == ""{ args.name = "default" } @@ -37,23 +34,55 @@ fn args_get (args_ ArgsGet) ArgsGet { } pub fn get(args_ ArgsGet) !&${args.classname} { + mut context:=base.context()! mut args := args_get(args_) + mut obj := ${args.classname}{} if !(args.name in ${args.name}_global) { - if args.name=="default"{ - if ! config_exists(args){ - if default{ - config_save(args)! - } - } - config_load(args)! - } + if ! exists(args)!{ + set(obj)! + }else{ + heroscript := context.hero_config_get("${args.name}",args.name)! + mut obj:=heroscript_loads(heroscript)! + set_in_mem(obj)! + } } - return ${args.name}_global[args.name] or { + return ${args.name}_global[args.name] or { println(${args.name}_global) - panic("could not get config for ${args.name} with name:??{args.name}") + //bug if we get here because should be in globals + panic("could not get config for ${args.name} with name, is bug:??{args.name}") } } +//register the config for the future +pub fn set(o ${args.classname})! { + set_in_mem(o)! + mut context := base.context()! + heroscript := heroscript_dumps(o)! + context.hero_config_set("gitea", o.name, heroscript)! +} + +//does the config exists? +pub fn exists(args_ ArgsGet)! { + mut context := base.context()! + mut args := args_get(args_) + return context.hero_config_exists("gitea", args.name) +} + +pub fn delete(args_ ArgsGet)! { + mut args := args_get(args_) + mut context:=base.context()! + context.hero_config_delete("${args.name}",args.name)! + if args.name in ${args.name}_global { + //del ${args.name}_global[args.name] + } +} + +//only sets in mem, does not set as config +fn set_in_mem(o ${args.classname})! { + mut o2:=obj_init(o)! + ${args.name}_global[o.name] = &o2 + ${args.name}_default = o.name +} @else pub fn get(args_ ArgsGet) !&${args.classname} { @@ -61,34 +90,6 @@ pub fn get(args_ ArgsGet) !&${args.classname} { } @end -@if args.hasconfig -fn config_exists(args_ ArgsGet) bool { - mut args := args_get(args_) - mut context:=base.context() or { panic("bug") } - return context.hero_config_exists("${args.name}",args.name) -} - -fn config_load(args_ ArgsGet) ! { - mut args := args_get(args_) - mut context:=base.context()! - mut heroscript := context.hero_config_get("${args.name}",args.name)! - play(heroscript:heroscript)! -} - -fn config_save(args_ ArgsGet) ! { - mut args := args_get(args_) - mut context:=base.context()! - context.hero_config_set("${args.name}",args.name,heroscript_default()!)! -} - - -fn set(o ${args.classname})! { - mut o2:=obj_init(o)! - ${args.name}_global[o.name] = &o2 - ${args.name}_default = o.name -} - - ^^[params] pub struct PlayArgs { pub mut: @@ -102,9 +103,7 @@ pub fn play(args_ PlayArgs) ! { mut args:=args_ @if args.hasconfig - if args.heroscript == "" { - args.heroscript = heroscript_default()! - } + @end mut plbook := args.plbook or { playbook.new(text: args.heroscript)! @@ -114,8 +113,9 @@ pub fn play(args_ PlayArgs) ! { mut install_actions := plbook.find(filter: '${args.name}.configure')! if install_actions.len > 0 { for install_action in install_actions { - mut p := install_action.params - cfg_play(p)! + heroscript:=install_action.heroscript() + mut obj2:=heroscript_loads(heroscript)! + set(obj2)! } } @end @@ -161,8 +161,6 @@ pub fn play(args_ PlayArgs) ! { } -@end - @if args.cat == .installer //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -309,3 +307,11 @@ pub fn (mut self ${args.classname}) destroy() ! { pub fn switch(name string) { ${args.name}_default = name } + + +//helpers + +^^[params] +pub struct DefaultConfigArgs{ + instance string = 'default' +} \ No newline at end of file diff --git a/lib/core/generator/generic/templates/objname_model.vtemplate b/lib/core/generator/generic/templates/objname_model.vtemplate index b881274b..5e07144f 100644 --- a/lib/core/generator/generic/templates/objname_model.vtemplate +++ b/lib/core/generator/generic/templates/objname_model.vtemplate @@ -1,47 +1,12 @@ module ${args.name} import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.data.encoderhero import os -pub const version = '1.14.3' +pub const version = '0.0.0' const singleton = ${args.singleton} const default = ${args.default} -@if args.hasconfig -//TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT -pub fn heroscript_default() !string { -@if args.cat == .installer - heroscript:=" - !!${args.name}.configure - name:'${args.name}' - homedir: '{HOME}/hero/var/${args.name}' - configpath: '{HOME}/.config/${args.name}/admin.yaml' - username: 'admin' - password: 'secretpassword' - secret: '' - title: 'My Hero DAG' - host: 'localhost' - port: 8888 - - " -@else - heroscript:=" - !!${args.name}.configure - name:'${args.name}' - mail_from: 'info@@example.com' - mail_password: 'secretpassword' - mail_port: 587 - mail_server: 'smtp-relay.brevo.com' - mail_username: 'kristof@@incubaid.com' - - " - -@end - - return heroscript - -} -@end - //THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED @if args.cat == .installer ^^[heap] @@ -53,33 +18,11 @@ pub mut: configpath string username string password string @@[secret] - secret string @@[secret] title string host string port int @end } -@if args.hasconfig -fn cfg_play(p paramsparser.Params) !${args.classname} { - //THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above - mut mycfg := ${args.classname}{ - name: p.get_default('name', 'default')! - homedir: p.get_default('homedir', '{HOME}/hero/var/${args.name}')! - configpath: p.get_default('configpath', '{HOME}/hero/var/${args.name}/admin.yaml')! - username: p.get_default('username', 'admin')! - password: p.get_default('password', '')! - secret: p.get_default('secret', '')! - title: p.get_default('title', 'HERO DAG')! - host: p.get_default('host', 'localhost')! - port: p.get_int_default('port', 8888)! - } - - if mycfg.password == '' && mycfg.secret == '' { - return error('password or secret needs to be filled in for ${args.name}') - } - return mycfg -} -@end @else @@ -94,27 +37,16 @@ pub mut: mail_username string } -@if args.hasconfig -fn cfg_play(p paramsparser.Params) ! { - //THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above - mut mycfg := ${args.classname}{ - name: p.get_default('name', 'default')! - mail_from: p.get('mail_from')! - mail_password: p.get('mail_password')! - mail_port: p.get_int_default('mail_port', 8888)! - mail_server: p.get('mail_server')! - mail_username: p.get('mail_username')! - } - set(mycfg)! -} @end -@end -fn obj_init(obj_ ${args.classname})!${args.classname}{ - //never call get here, only thing we can do here is work on object itself - mut obj:=obj_ - return obj +//your checking & initialization code if needed +fn obj_init(mycfg_ ${args.classname})!${args.classname}{ + mut mycfg:=mycfg_ + if mycfg.password == '' && mycfg.secret == '' { + return error('password or secret needs to be filled in for ??{mycfg.name}') + } + return mycfg } @if args.cat == .installer @@ -135,3 +67,13 @@ fn configure() ! { @end +/////////////NORMALLY NO NEED TO TOUCH + +pub fn heroscript_dumps(obj ${args.classname}) !string { + return encoderhero.encode[${args.classname} ](obj)! +} + +pub fn heroscript_loads(heroscript string) !${args.classname} { + mut obj := encoderhero.decode[${args.classname}](heroscript)! + return obj +} diff --git a/lib/core/herocmds/docusaurus.v b/lib/core/herocmds/docusaurus.v new file mode 100644 index 00000000..c3449411 --- /dev/null +++ b/lib/core/herocmds/docusaurus.v @@ -0,0 +1,112 @@ +module herocmds + +import freeflowuniverse.herolib.web.docusaurus +import os +import cli { Command, Flag } + +pub fn cmd_docusaurus(mut cmdroot Command) { + mut cmd_run := Command{ + name: 'docusaurus' + description: 'Generate, build, run docusaurus sites.' + required_args: 0 + execute: cmd_docusaurus_execute + } + + // cmd_run.add_flag(Flag{ + // flag: .bool + // required: false + // name: 'reset' + // abbrev: 'r' + // description: 'will reset.' + // }) + + cmd_run.add_flag(Flag{ + flag: .string + required: false + name: 'url' + abbrev: 'u' + // default: '' + description: 'Url where docusaurus source is.' + }) + + cmd_run.add_flag(Flag{ + flag: .bool + required: false + name: 'build' + abbrev: 'b' + description: 'build and publish.' + }) + + cmd_run.add_flag(Flag{ + flag: .bool + required: false + name: 'builddev' + abbrev: 'bd' + description: 'build dev version and publish.' + }) + + cmd_run.add_flag(Flag{ + flag: .bool + required: false + name: 'update' + abbrev: 'p' + description: 'update your environment the template and the repo you are working on (git pull).' + }) + + + cmd_run.add_flag(Flag{ + flag: .bool + required: false + name: 'dev' + abbrev: 'd' + description: 'Run your dev environment on local browser.' + }) + + cmdroot.add_command(cmd_run) +} + +fn cmd_docusaurus_execute(cmd Command) ! { + mut update := cmd.flags.get_bool('update') or { false } + mut url := cmd.flags.get_string('url') or { '' } + + // mut path := cmd.flags.get_string('path') or { '' } + // if path == '' { + // path = os.getwd() + // } + // path = path.replace('~', os.home_dir()) + + mut build := cmd.flags.get_bool('build') or { false } + mut builddev := cmd.flags.get_bool('builddev') or { false } + mut dev := cmd.flags.get_bool('dev') or { false } + + // if build== false && build== false && build== false { + // eprintln("specify build, builddev or dev") + // exit(1) + // } + + mut docs := docusaurus.new(update:update)! + + if build { + // Create a new docusaurus site + _ := docs.build( + url: url + update:update + )! + } + + if builddev { + // Create a new docusaurus site + _ := docs.build_dev( + url: url + update:update + )! + } + + if dev { + // Create a new docusaurus site + _ := docs.dev( + url: url + update:update + )! + } +} diff --git a/lib/core/herocmds/git.v b/lib/core/herocmds/git.v index 94a2ef33..c701721d 100644 --- a/lib/core/herocmds/git.v +++ b/lib/core/herocmds/git.v @@ -104,6 +104,14 @@ pub fn cmd_git(mut cmdroot Command) { abbrev: 's' description: 'be silent.' }) + + c.add_flag(Flag{ + flag: .bool + required: false + name: 'load' + abbrev: 'l' + description: 'reload the data in cache.' + }) } mut allcmdscommit := [&push_command, &pull_command, &commit_command] @@ -217,8 +225,10 @@ pub fn cmd_git(mut cmdroot Command) { } fn cmd_git_execute(cmd Command) ! { - mut silent := cmd.flags.get_bool('silent') or { false } - if silent || cmd.name == 'cd' { + mut is_silent := cmd.flags.get_bool('silent') or { false } + mut reload := cmd.flags.get_bool('load') or { false } + + if is_silent || cmd.name == 'cd' { console.silent_set() } mut coderoot := cmd.flags.get_string('coderoot') or { '' } @@ -262,6 +272,7 @@ fn cmd_git_execute(cmd Command) ! { mypath := gs.do( filter: filter repo: repo + reload: reload account: account provider: provider branch: branch diff --git a/lib/core/jobs/model/agent.v b/lib/core/jobs/model/agent.v new file mode 100644 index 00000000..a6223641 --- /dev/null +++ b/lib/core/jobs/model/agent.v @@ -0,0 +1,60 @@ +module model + +import freeflowuniverse.herolib.data.ourtime + +// Agent represents a service provider that can execute jobs +pub struct Agent { +pub mut: + pubkey string // pubkey using ed25519 + address string // where we can find the agent + port int // default 9999 + description string // optional + status AgentStatus + services []AgentService // these are the public services + signature string // signature as done by private key of $address+$port+$description+$status +} + +// AgentStatus represents the current state of an agent +pub struct AgentStatus { +pub mut: + guid string // unique id for the job + timestamp_first ourtime.OurTime // when agent came online + timestamp_last ourtime.OurTime // last time agent let us know that he is working + status AgentState // current state of the agent +} + +// AgentService represents a service provided by an agent +pub struct AgentService { +pub mut: + actor string // name of the actor providing the service + actions []AgentServiceAction // available actions for this service + description string // optional description + status AgentServiceState // current state of the service +} + +// AgentServiceAction represents an action that can be performed by a service +pub struct AgentServiceAction { +pub mut: + action string // which action + description string // optional description + params map[string]string // e.g. name:'name of the vm' ... + params_example map[string]string // e.g. name:'myvm' + status AgentServiceState // current state of the action + public bool // if everyone can use then true, if restricted means only certain people can use +} + +// AgentState represents the possible states of an agent +pub enum AgentState { + ok // agent is functioning normally + down // agent is not responding + error // agent encountered an error + halted // agent has been manually stopped +} + +// AgentServiceState represents the possible states of an agent service or action +pub enum AgentServiceState { + ok // service/action is functioning normally + down // service/action is not available + error // service/action encountered an error + halted // service/action has been manually stopped +} diff --git a/lib/core/jobs/model/agent_manager.v b/lib/core/jobs/model/agent_manager.v new file mode 100644 index 00000000..350157da --- /dev/null +++ b/lib/core/jobs/model/agent_manager.v @@ -0,0 +1,91 @@ +module model + +import freeflowuniverse.herolib.core.redisclient +import freeflowuniverse.herolib.data.ourtime +import json + +const agents_key = 'herorunner:agents' // Redis key for storing agents + +// AgentManager handles all agent-related operations +pub struct AgentManager { +mut: + redis &redisclient.Redis +} + +// new creates a new Agent instance +pub fn (mut m AgentManager) new() Agent { + return Agent{ + pubkey: '' // Empty pubkey to be filled by caller + port: 9999 // Default port + status: AgentStatus{ + guid: '' + timestamp_first: ourtime.now() + timestamp_last: ourtime.OurTime{} + status: .ok + } + services: []AgentService{} + } +} + +// add adds a new agent to Redis +pub fn (mut m AgentManager) set(agent Agent) ! { + // Store agent in Redis hash where key is agent.pubkey and value is JSON of agent + agent_json := json.encode(agent) + m.redis.hset(agents_key, agent.pubkey, agent_json)! +} + +// get retrieves an agent by its public key +pub fn (mut m AgentManager) get(pubkey string) !Agent { + agent_json := m.redis.hget(agents_key, pubkey)! + return json.decode(Agent, agent_json) +} + +// list returns all agents +pub fn (mut m AgentManager) list() ![]Agent { + mut agents := []Agent{} + + // Get all agents from Redis hash + agents_map := m.redis.hgetall(agents_key)! + + // Convert each JSON value to Agent struct + for _, agent_json in agents_map { + agent := json.decode(Agent, agent_json)! + agents << agent + } + + return agents +} + +// delete removes an agent by its public key +pub fn (mut m AgentManager) delete(pubkey string) ! { + m.redis.hdel(agents_key, pubkey)! +} + +// update_status updates just the status of an agent +pub fn (mut m AgentManager) update_status(pubkey string, status AgentState) ! { + mut agent := m.get(pubkey)! + agent.status.status = status + m.set(agent)! +} + +// get_by_service returns all agents that provide a specific service +pub fn (mut m AgentManager) get_by_service(actor string, action string) ![]Agent { + mut matching_agents := []Agent{} + + agents := m.list()! + for agent in agents { + for service in agent.services { + if service.actor != actor { + continue + } + for act in service.actions { + if act.action == action { + matching_agents << agent + break + } + } + } + } + + return matching_agents +} diff --git a/lib/core/jobs/model/agent_manager_test.v b/lib/core/jobs/model/agent_manager_test.v new file mode 100644 index 00000000..742e03a9 --- /dev/null +++ b/lib/core/jobs/model/agent_manager_test.v @@ -0,0 +1,74 @@ +module model + +import freeflowuniverse.herolib.core.redisclient +import freeflowuniverse.herolib.data.ourtime + +fn test_agents_model() { + mut runner := new()! + + // Create a new agent using the manager + mut agent := runner.agents.new() + agent.pubkey = 'test-agent-1' + agent.address = '127.0.0.1' + agent.description = 'Test Agent' + + // Create a service action + mut action := AgentServiceAction{ + action: 'start' + description: 'Start a VM' + params: { + 'name': 'string' + } + params_example: { + 'name': 'myvm' + } + status: .ok + public: true + } + + // Create a service + mut service := AgentService{ + actor: 'vm_manager' + actions: [action] + description: 'VM Management Service' + status: .ok + } + + agent.services = [service] + + // Add the agent + runner.agents.set(agent)! + + // Get the agent and verify fields + retrieved_agent := runner.agents.get(agent.pubkey)! + assert retrieved_agent.pubkey == agent.pubkey + assert retrieved_agent.address == agent.address + assert retrieved_agent.description == agent.description + assert retrieved_agent.services.len == 1 + assert retrieved_agent.services[0].actor == 'vm_manager' + assert retrieved_agent.status.status == .ok + + // Update agent status + runner.agents.update_status(agent.pubkey, .down)! + updated_agent := runner.agents.get(agent.pubkey)! + assert updated_agent.status.status == .down + + // Test get_by_service + agents := runner.agents.get_by_service('vm_manager', 'start')! + assert agents.len > 0 + assert agents[0].pubkey == agent.pubkey + + // List all agents + all_agents := runner.agents.list()! + assert all_agents.len > 0 + assert all_agents[0].pubkey == agent.pubkey + + // Delete the agent + runner.agents.delete(agent.pubkey)! + + // Verify deletion + agents_after := runner.agents.list()! + for a in agents_after { + assert a.pubkey != agent.pubkey + } +} diff --git a/lib/core/jobs/model/factory.v b/lib/core/jobs/model/factory.v new file mode 100644 index 00000000..7a697472 --- /dev/null +++ b/lib/core/jobs/model/factory.v @@ -0,0 +1,37 @@ +module model + +import freeflowuniverse.herolib.core.redisclient + +// HeroRunner is the main factory for managing jobs, agents, services and groups +pub struct HeroRunner { +mut: + redis &redisclient.Redis +pub mut: + jobs &JobManager + agents &AgentManager + services &ServiceManager + groups &GroupManager +} + +// new creates a new HeroRunner instance +pub fn new() !&HeroRunner { + mut redis := redisclient.core_get()! + + mut hr := &HeroRunner{ + redis: redis + jobs: &JobManager{ + redis: redis + } + agents: &AgentManager{ + redis: redis + } + services: &ServiceManager{ + redis: redis + } + groups: &GroupManager{ + redis: redis + } + } + + return hr +} diff --git a/lib/core/jobs/model/group.v b/lib/core/jobs/model/group.v new file mode 100644 index 00000000..b945006d --- /dev/null +++ b/lib/core/jobs/model/group.v @@ -0,0 +1,10 @@ +module model + +// Group represents a collection of members (users or other groups) +pub struct Group { +pub mut: + guid string // unique id + name string // name of the group + description string // optional description + members []string // can be other group or member which is defined by pubkey +} diff --git a/lib/core/jobs/model/group_manager.v b/lib/core/jobs/model/group_manager.v new file mode 100644 index 00000000..ba7f94f2 --- /dev/null +++ b/lib/core/jobs/model/group_manager.v @@ -0,0 +1,99 @@ +module model + +import freeflowuniverse.herolib.core.redisclient +import json + +const groups_key = 'herorunner:groups' // Redis key for storing groups + +// GroupManager handles all group-related operations +pub struct GroupManager { +mut: + redis &redisclient.Redis +} + +// new creates a new Group instance +pub fn (mut m GroupManager) new() Group { + return Group{ + guid: '' // Empty GUID to be filled by caller + members: []string{} + } +} + +// add adds a new group to Redis +pub fn (mut m GroupManager) set(group Group) ! { + // Store group in Redis hash where key is group.guid and value is JSON of group + group_json := json.encode(group) + m.redis.hset(groups_key, group.guid, group_json)! +} + +// get retrieves a group by its GUID +pub fn (mut m GroupManager) get(guid string) !Group { + group_json := m.redis.hget(groups_key, guid)! + return json.decode(Group, group_json) +} + +// list returns all groups +pub fn (mut m GroupManager) list() ![]Group { + mut groups := []Group{} + + // Get all groups from Redis hash + groups_map := m.redis.hgetall(groups_key)! + + // Convert each JSON value to Group struct + for _, group_json in groups_map { + group := json.decode(Group, group_json)! + groups << group + } + + return groups +} + +// delete removes a group by its GUID +pub fn (mut m GroupManager) delete(guid string) ! { + m.redis.hdel(groups_key, guid)! +} + +// add_member adds a member (user pubkey or group GUID) to a group +pub fn (mut m GroupManager) add_member(guid string, member string) ! { + mut group := m.get(guid)! + if member !in group.members { + group.members << member + m.set(group)! + } +} + +// remove_member removes a member from a group +pub fn (mut m GroupManager) remove_member(guid string, member string) ! { + mut group := m.get(guid)! + group.members = group.members.filter(it != member) + m.set(group)! +} + +pub fn (mut m GroupManager) get_user_groups(user_pubkey string) ![]Group { + mut user_groups := []Group{} + mut checked_groups := map[string]bool{} + groups := m.list()! + // Check each group + for group in groups { + check_group_membership(group, user_pubkey, groups, mut checked_groups, mut user_groups) + } + return user_groups +} + +// Recursive function to check group membership +fn check_group_membership(group Group, user string, groups []Group, mut checked map[string]bool, mut result []Group) { + if group.guid in checked { + return + } + checked[group.guid] = true + + if user in group.members { + result << group + // Check parent groups + for parent_group in groups { + if group.guid in parent_group.members { + check_group_membership(parent_group, user, groups, mut checked, mut result) + } + } + } +} diff --git a/lib/core/jobs/model/group_manager_test.v b/lib/core/jobs/model/group_manager_test.v new file mode 100644 index 00000000..24e08716 --- /dev/null +++ b/lib/core/jobs/model/group_manager_test.v @@ -0,0 +1,67 @@ +module model + +import freeflowuniverse.herolib.core.redisclient + +fn test_groups() { + mut runner := new()! + + // Create a new group using the manager + mut group := runner.groups.new() + group.guid = 'admin-group' + group.name = 'Administrators' + group.description = 'Administrator group with full access' + + // Add the group + runner.groups.set(group)! + + // Create a subgroup + mut subgroup := runner.groups.new() + subgroup.guid = 'vm-admins' + subgroup.name = 'VM Administrators' + subgroup.description = 'VM management administrators' + + runner.groups.set(subgroup)! + + // Add subgroup to main group + runner.groups.add_member(group.guid, subgroup.guid)! + + // Add a user to the subgroup + runner.groups.add_member(subgroup.guid, 'user-1-pubkey')! + + // Get the groups and verify fields + retrieved_group := runner.groups.get(group.guid)! + assert retrieved_group.guid == group.guid + assert retrieved_group.name == group.name + assert retrieved_group.description == group.description + assert retrieved_group.members.len == 1 + assert retrieved_group.members[0] == subgroup.guid + + retrieved_subgroup := runner.groups.get(subgroup.guid)! + assert retrieved_subgroup.members.len == 1 + assert retrieved_subgroup.members[0] == 'user-1-pubkey' + + // Test recursive group membership + user_groups := runner.groups.get_user_groups('user-1-pubkey')! + assert user_groups.len == 1 + assert user_groups[0].guid == subgroup.guid + + // Remove member from subgroup + runner.groups.remove_member(subgroup.guid, 'user-1-pubkey')! + updated_subgroup := runner.groups.get(subgroup.guid)! + assert updated_subgroup.members.len == 0 + + // List all groups + groups := runner.groups.list()! + assert groups.len == 2 + + // Delete the groups + runner.groups.delete(subgroup.guid)! + runner.groups.delete(group.guid)! + + // Verify deletion + groups_after := runner.groups.list()! + for g in groups_after { + assert g.guid != group.guid + assert g.guid != subgroup.guid + } +} diff --git a/lib/core/jobs/model/job.v b/lib/core/jobs/model/job.v new file mode 100644 index 00000000..87bf99ad --- /dev/null +++ b/lib/core/jobs/model/job.v @@ -0,0 +1,52 @@ +module model + +import freeflowuniverse.herolib.data.ourtime + +// Job represents a task to be executed by an agent +pub struct Job { +pub mut: + guid string // unique id for the job + agents []string // the pub key of the agent(s) which will execute the command, only 1 will execute + source string // pubkey from the agent who asked for the job + circle string = 'default' // our digital life is organized in circles + context string = 'default' // is the high level context in which actors will execute the work inside a circle + actor string // e.g. vm_manager + action string // e.g. start + params map[string]string // e.g. id:10 + timeout_schedule u16 = 60 // timeout before its picked up + timeout u16 = 3600 // timeout in sec + log bool = true + ignore_error bool // means if error will just exit and not raise, there will be no error reporting + ignore_error_codes []int // of we want to ignore certain error codes + debug bool // if debug will get more context + retry int // default there is no debug + status JobStatus + dependencies []JobDependency // will not execute until other jobs are done +} + +// JobStatus represents the current state of a job +pub struct JobStatus { +pub mut: + guid string // unique id for the job + created ourtime.OurTime // when we created the job + start ourtime.OurTime // when the job needs to start + end ourtime.OurTime // when the job ended, can be in error + status Status // current status of the job +} + +// JobDependency represents a dependency on another job +pub struct JobDependency { +pub mut: + guid string // unique id for the job + agents []string // the pub key of the agent(s) which can execute the command +} + +// Status represents the possible states of a job +pub enum Status { + created // initial state + scheduled // job has been scheduled + planned // arrived where actor will execute the job + running // job is currently running + error // job encountered an error + ok // job completed successfully +} diff --git a/lib/core/jobs/model/job_manager.v b/lib/core/jobs/model/job_manager.v new file mode 100644 index 00000000..66e15999 --- /dev/null +++ b/lib/core/jobs/model/job_manager.v @@ -0,0 +1,68 @@ +module model + +import freeflowuniverse.herolib.core.redisclient +import freeflowuniverse.herolib.data.ourtime +import json + +const jobs_key = 'herorunner:jobs' // Redis key for storing jobs + +// JobManager handles all job-related operations +pub struct JobManager { +mut: + redis &redisclient.Redis +} + +// new creates a new Job instance +pub fn (mut m JobManager) new() Job { + return Job{ + guid: '' // Empty GUID to be filled by caller + status: JobStatus{ + guid: '' + created: ourtime.now() + start: ourtime.OurTime{} + end: ourtime.OurTime{} + status: .created + } + } +} + +// add adds a new job to Redis +pub fn (mut m JobManager) set(job Job) ! { + // Store job in Redis hash where key is job.guid and value is JSON of job + job_json := json.encode(job) + m.redis.hset(jobs_key, job.guid, job_json)! +} + +// get retrieves a job by its GUID +pub fn (mut m JobManager) get(guid string) !Job { + job_json := m.redis.hget(jobs_key, guid)! + return json.decode(Job, job_json) +} + +// list returns all jobs +pub fn (mut m JobManager) list() ![]Job { + mut jobs := []Job{} + + // Get all jobs from Redis hash + jobs_map := m.redis.hgetall(jobs_key)! + + // Convert each JSON value to Job struct + for _, job_json in jobs_map { + job := json.decode(Job, job_json)! + jobs << job + } + + return jobs +} + +// delete removes a job by its GUID +pub fn (mut m JobManager) delete(guid string) ! { + m.redis.hdel(jobs_key, guid)! +} + +// update_status updates just the status of a job +pub fn (mut m JobManager) update_status(guid string, status Status) ! { + mut job := m.get(guid)! + job.status.status = status + m.set(job)! +} diff --git a/lib/core/jobs/model/job_manager_test.v b/lib/core/jobs/model/job_manager_test.v new file mode 100644 index 00000000..f20b18bd --- /dev/null +++ b/lib/core/jobs/model/job_manager_test.v @@ -0,0 +1,47 @@ +module model + +import freeflowuniverse.herolib.core.redisclient +import freeflowuniverse.herolib.data.ourtime + +fn test_jobs() { + mut runner := new()! + + // Create a new job using the manager + mut job := runner.jobs.new() + job.guid = 'test-job-1' + job.actor = 'vm_manager' + job.action = 'start' + job.params = { + 'id': '10' + } + + // Add the job + runner.jobs.set(job)! + + // Get the job and verify fields + retrieved_job := runner.jobs.get(job.guid)! + assert retrieved_job.guid == job.guid + assert retrieved_job.actor == job.actor + assert retrieved_job.action == job.action + assert retrieved_job.params['id'] == job.params['id'] + assert retrieved_job.status.status == .created + + // Update job status + runner.jobs.update_status(job.guid, .running)! + updated_job := runner.jobs.get(job.guid)! + assert updated_job.status.status == .running + + // List all jobs + jobs := runner.jobs.list()! + assert jobs.len > 0 + assert jobs[0].guid == job.guid + + // Delete the job + runner.jobs.delete(job.guid)! + + // Verify deletion + jobs_after := runner.jobs.list()! + for j in jobs_after { + assert j.guid != job.guid + } +} diff --git a/lib/core/jobs/model/service.v b/lib/core/jobs/model/service.v new file mode 100644 index 00000000..589d59d9 --- /dev/null +++ b/lib/core/jobs/model/service.v @@ -0,0 +1,44 @@ +module model + +// Service represents a service that can be provided by agents +pub struct Service { +pub mut: + actor string // name of the actor providing the service + actions []ServiceAction // available actions for this service + description string // optional description + status ServiceState // current state of the service + acl ?ACL // access control list for the service +} + +// ServiceAction represents an action that can be performed by a service +pub struct ServiceAction { +pub mut: + action string // which action + description string // optional description + params map[string]string // e.g. name:'name of the vm' ... + params_example map[string]string // e.g. name:'myvm' + acl ?ACL // if not used then everyone can use +} + +// ACL represents an access control list +pub struct ACL { +pub mut: + name string + ace []ACE +} + +// ACE represents an access control entry +pub struct ACE { +pub mut: + groups []string // guid's of the groups who have access + users []string // in case groups are not used then is users + right string // e.g. read, write, admin, block +} + +// ServiceState represents the possible states of a service +pub enum ServiceState { + ok // service is functioning normally + down // service is not available + error // service encountered an error + halted // service has been manually stopped +} diff --git a/lib/core/jobs/model/service_manager.v b/lib/core/jobs/model/service_manager.v new file mode 100644 index 00000000..322fc718 --- /dev/null +++ b/lib/core/jobs/model/service_manager.v @@ -0,0 +1,122 @@ +module model + +import freeflowuniverse.herolib.core.redisclient +import json + +const services_key = 'herorunner:services' // Redis key for storing services + +// ServiceManager handles all service-related operations +pub struct ServiceManager { +mut: + redis &redisclient.Redis +} + +// new creates a new Service instance +pub fn (mut m ServiceManager) new() Service { + return Service{ + actor: '' // Empty actor name to be filled by caller + actions: []ServiceAction{} + status: .ok + } +} + +// add adds a new service to Redis +pub fn (mut m ServiceManager) set(service Service) ! { + // Store service in Redis hash where key is service.actor and value is JSON of service + service_json := json.encode(service) + m.redis.hset(services_key, service.actor, service_json)! +} + +// get retrieves a service by its actor name +pub fn (mut m ServiceManager) get(actor string) !Service { + service_json := m.redis.hget(services_key, actor)! + return json.decode(Service, service_json) +} + +// list returns all services +pub fn (mut m ServiceManager) list() ![]Service { + mut services := []Service{} + + // Get all services from Redis hash + services_map := m.redis.hgetall(services_key)! + + // Convert each JSON value to Service struct + for _, service_json in services_map { + service := json.decode(Service, service_json)! + services << service + } + + return services +} + +// delete removes a service by its actor name +pub fn (mut m ServiceManager) delete(actor string) ! { + m.redis.hdel(services_key, actor)! +} + +// update_status updates just the status of a service +pub fn (mut m ServiceManager) update_status(actor string, status ServiceState) ! { + mut service := m.get(actor)! + service.status = status + m.set(service)! +} + +// get_by_action returns all services that provide a specific action +pub fn (mut m ServiceManager) get_by_action(action string) ![]Service { + mut matching_services := []Service{} + + services := m.list()! + for service in services { + for act in service.actions { + if act.action == action { + matching_services << service + break + } + } + } + + return matching_services +} + +// check_access verifies if a user has access to a service action +pub fn (mut m ServiceManager) check_access(actor string, action string, user_pubkey string, groups []string) !bool { + service := m.get(actor)! + + // Find the specific action + mut service_action := ServiceAction{} + mut found := false + for act in service.actions { + if act.action == action { + service_action = act + found = true + break + } + } + if !found { + return error('Action ${action} not found in service ${actor}') + } + + // If no ACL is defined, access is granted + if service_action.acl == none { + return true + } + + acl := service_action.acl or { return true } + + // Check each ACE in the ACL + for ace in acl.ace { + // Check if user is directly listed + if user_pubkey in ace.users { + return ace.right != 'block' + } + + // Check if any of user's groups are listed + for group in groups { + if group in ace.groups { + return ace.right != 'block' + } + } + } + + return false +} diff --git a/lib/core/jobs/model/service_manager_test.v b/lib/core/jobs/model/service_manager_test.v new file mode 100644 index 00000000..fe3718ce --- /dev/null +++ b/lib/core/jobs/model/service_manager_test.v @@ -0,0 +1,87 @@ +module model + +import freeflowuniverse.herolib.core.redisclient + +fn test_services() { + mut runner := new()! + + // Create a new service using the manager + mut service := runner.services.new() + service.actor = 'vm_manager' + service.description = 'VM Management Service' + + // Create an ACL + mut ace := ACE{ + groups: ['admin-group'] + users: ['user-1-pubkey'] + right: 'write' + } + + mut acl := ACL{ + name: 'vm-acl' + ace: [ace] + } + + // Create a service action + mut action := ServiceAction{ + action: 'start' + description: 'Start a VM' + params: { + 'name': 'string' + } + params_example: { + 'name': 'myvm' + } + acl: acl + } + + service.actions = [action] + + // Add the service + runner.services.set(service)! + + // Get the service and verify fields + retrieved_service := runner.services.get(service.actor)! + assert retrieved_service.actor == service.actor + assert retrieved_service.description == service.description + assert retrieved_service.actions.len == 1 + assert retrieved_service.actions[0].action == 'start' + assert retrieved_service.status == .ok + + // Update service status + runner.services.update_status(service.actor, .down)! + updated_service := runner.services.get(service.actor)! + assert updated_service.status == .down + + // Test get_by_action + services := runner.services.get_by_action('start')! + assert services.len > 0 + assert services[0].actor == service.actor + + // Test access control + has_access := runner.services.check_access(service.actor, 'start', 'user-1-pubkey', + [])! + assert has_access == true + + has_group_access := runner.services.check_access(service.actor, 'start', 'user-2-pubkey', + ['admin-group'])! + assert has_group_access == true + + no_access := runner.services.check_access(service.actor, 'start', 'user-3-pubkey', + [])! + assert no_access == false + + // List all services + all_services := runner.services.list()! + assert all_services.len > 0 + assert all_services[0].actor == service.actor + + // Delete the service + runner.services.delete(service.actor)! + + // Verify deletion + services_after := runner.services.list()! + for s in services_after { + assert s.actor != service.actor + } +} diff --git a/lib/core/jobs/model/specs.md b/lib/core/jobs/model/specs.md new file mode 100644 index 00000000..9d5964f6 --- /dev/null +++ b/lib/core/jobs/model/specs.md @@ -0,0 +1,186 @@ +create a job manager in +lib/core/jobs + + +## some definitions + +- agent: is a self contained set of processes which can execute on actions or actions to be executed by others +- action: what needs to be executed +- circle: each action happens in a circle +- context: a context inside a circle is optional +- job, what gets executed by an agent, is one action, can depend on other actions +- herorunner: is the process which uses redis to manage all open jobs, checks for timeouts, does the forwards if needed (if remote agent need to schedule, ...) + +## jobs + +are executed by processes can be in different languages and they are identified by agent pub key (the one who executes) +as part of heroscript we know what to executed on which actor inside the agent, defined with method and its arguments + +```v + +//the description of what needs to be executed +pub struct Job { +pub mut: + guid string //unique id for the job + agents []string //the pub key of the agent(s) which will execute the command, only 1 will execute, the herorunner will try the different agents if needed till it has success + source string //pubkey from the agent who asked for the job + circle string = "default" //our digital life is organized in circles + context string = "default" //is the high level context in which actors will execute the work inside a circle + actor string //e.g. vm_manager + action string //e.g. start + params map[string]string //e.g. id:10 + timeout_schedule u16 = 60 //timeout before its picked up + timeout u16 = 3600 // timeout in sec + log bool = true + ignore_error bool // means if error will just exit and not raise, there will be no error reporting + ignore_error_codes []int // of we want to ignore certain error codes + debug bool // if debug will get more context + retry int // default there is no debug + status JobStatus + dependencies []JobDependency //will not execute untill other jobs are done + +} + +pub struct JobStatus { +pub mut: + guid string //unique id for the job + created u32 //epoch when we created the job + start u32 //epoch when the job needs to start + end u32 //epoch when the job ended, can be in error + status //ENUM: create scheduled, planned (means arrived where actor will execute the job), running, error, ok +} + +pub struct JobDependency { +pub mut: + guid string //unique id for the job + agents []string //the pub key of the agent(s) which can execute the command +} + + + +``` + +the Job object is stored in redis in hset herorunner:jobs where key is the job guid and the val is the json of Job + +## Agent Registration Services + +Each agent (the one who hosts the different actors which execute the methods with params) register themselves to all participants. + +the structs below are available to everyone and are public + +```v + +pub struct Agent { +pub mut: + pubkey string //pubkey using ed25519 + address string //where we can gind the agent + port int //default 9999 + description string //optional + status AgentStatus + services []AgentService //these are the public services + signature string //signature as done by private key of $address+$port+$description+$status (this allows everyone to verify that the data is ok) + + +} + +pub struct AgentStatus { +pub mut: + guid string //unique id for the job + timestamp_first u32 //when agent came online + timestamp_last u32 //last time agent let us know that he is working + status //ENUM: ok, down, error, halted +} + +pub struct AgentService { +pub mut: + actor string + actions []AgentServiceAction + description string + status //ENUM: ok, down, error, halted +} + +pub struct AgentServiceAction { +pub mut: + action string //which action + description string //optional descroption + params map[string]string //e.g. name:'name of the vm' ... + params_example map[string]string // e.g. name:'myvm' + status //ENUM: ok, down, error, halted + public bool //if everyone can use then true, if restricted means only certain people can use +} + + + + + +``` + +the Agent object is stored in redis in hset herorunner:agents where key is the agent pubkey and the val is the json of Agent + + +### Services Info + +The agent and its actors register their capability to the herorunner + +We have a mechanism to be specific on who can execute which, this is sort of ACL system, for now its quite rough + + + +```v + +pub struct Group { +pub mut: + guid string //unique id + name string + description string + members []string //can be other group or member which is defined by pubkey +} + + +``` + +this info is stored in in redis on herorunner:groups + + + +```v + +pub struct Service { +pub mut: + actor string + actions []AgentServiceAction + description string + status //ENUM: ok, down, error, halted + acl ?ACL +} + +pub struct ServiceAction { +pub mut: + action string //which action + description string //optional descroption + params map[string]string //e.g. name:'name of the vm' ... + params_example map[string]string // e.g. name:'myvm' + acl ?ACL //if not used then everyone can use +} + +pub struct ACL { +pub mut: + name string + ace []ACE +} + + +pub struct ACE { +pub mut: + groups []string //guid's of the groups who have access + users []string //in case groups are not used then is users + right string e.g. read, write, admin, block +} + + + + +``` + +The info for the herorunner to function is in redis on herorunner:services + diff --git a/lib/core/jobs/openrpc/.gitignore b/lib/core/jobs/openrpc/.gitignore new file mode 100644 index 00000000..c62db7b9 --- /dev/null +++ b/lib/core/jobs/openrpc/.gitignore @@ -0,0 +1,2 @@ +server +job_client \ No newline at end of file diff --git a/lib/core/jobs/openrpc/examples/job_client.vsh b/lib/core/jobs/openrpc/examples/job_client.vsh new file mode 100755 index 00000000..bc2d03e7 --- /dev/null +++ b/lib/core/jobs/openrpc/examples/job_client.vsh @@ -0,0 +1,179 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.core.jobs.model +import net.websocket +import json +import rand +import time +import term + +const ws_url = 'ws://localhost:8080' + +// Helper function to send request and receive response +fn send_request(mut ws websocket.Client, request OpenRPCRequest) !OpenRPCResponse { + // Send request + request_json := json.encode(request) + println(request_json) + ws.write_string(request_json) or { + eprintln(term.red('Failed to send request: ${err}')) + return err + } + + // Wait for response + mut msg := ws.read_next_message() or { + eprintln(term.red('Failed to read response: ${err}')) + return err + } + + if msg.opcode != websocket.OPCode.text_frame { + return error('Invalid response type: expected text frame') + } + + response_text := msg.payload.bytestr() + + // Parse response + response := json.decode(OpenRPCResponse, response_text) or { + eprintln(term.red('Failed to decode response: ${err}')) + return err + } + return response +} + +// OpenRPC request/response structures (copied from handler.v) +struct OpenRPCRequest { + jsonrpc string @[required] + method string @[required] + params []string + id int @[required] +} + +struct OpenRPCResponse { + jsonrpc string @[required] + result string + error string + id int @[required] +} + +// Initialize and configure WebSocket client +fn init_client() !&websocket.Client { + mut ws := websocket.new_client(ws_url)! + + ws.on_open(fn (mut ws websocket.Client) ! { + println(term.green('Connected to WebSocket server and ready...')) + }) + + ws.on_error(fn (mut ws websocket.Client, err string) ! { + eprintln(term.red('WebSocket error: ${err}')) + }) + + ws.on_close(fn (mut ws websocket.Client, code int, reason string) ! { + println(term.yellow('WebSocket connection closed: ${reason}')) + }) + + ws.on_message(fn (mut ws websocket.Client, msg &websocket.Message) ! { + if msg.payload.len > 0 { + println(term.blue('Received message: ${msg.payload.bytestr()}')) + } + }) + + ws.connect() or { + eprintln(term.red('Failed to connect: ${err}')) + return err + } + + spawn ws.listen() + return ws +} + +// Main client logic +mut ws := init_client()! +defer { + ws.close(1000, 'normal') or { eprintln(term.red('Error closing connection: ${err}')) } +} +println(term.green('Connected to ${ws_url}')) + +// Create a new job +println(term.blue('\nCreating new job...')) +new_job := send_request(mut ws, OpenRPCRequest{ + jsonrpc: '2.0' + method: 'job.new' + params: []string{} + id: rand.i32_in_range(1, 10000000)! +}) or { + eprintln(term.red('Failed to create new job: ${err}')) + exit(1) +} +println(term.green('Created new job:')) +println(json.encode_pretty(new_job)) + +// Parse job from response +job := json.decode(model.Job, new_job.result) or { + eprintln(term.red('Failed to parse job: ${err}')) + exit(1) +} + +// Set job properties +println(term.blue('\nSetting job properties...')) +mut updated_job := job +updated_job.guid = 'test-job-1' +updated_job.actor = 'vm_manager' +updated_job.action = 'start' +updated_job.params = { + 'name': 'test-vm' + 'memory': '2048' +} + +// Save job +set_response := send_request(mut ws, OpenRPCRequest{ + jsonrpc: '2.0' + method: 'job.set' + params: [json.encode(updated_job)] + id: rand.int() +}) or { + eprintln(term.red('Failed to save job: ${err}')) + exit(1) +} +println(term.green('Saved job:')) +println(json.encode_pretty(set_response)) + +// Update job status to running +println(term.blue('\nUpdating job status...')) +update_response := send_request(mut ws, OpenRPCRequest{ + jsonrpc: '2.0' + method: 'job.update_status' + params: ['test-job-1', 'running'] + id: rand.int() +}) or { + eprintln(term.red('Failed to update job status: ${err}')) + exit(1) +} +println(term.green('Updated job status:')) +println(json.encode_pretty(update_response)) + +// Get job to verify changes +println(term.blue('\nRetrieving job...')) +get_response := send_request(mut ws, OpenRPCRequest{ + jsonrpc: '2.0' + method: 'job.get' + params: ['test-job-1'] + id: rand.int() +}) or { + eprintln(term.red('Failed to retrieve job: ${err}')) + exit(1) +} +println(term.green('Retrieved job:')) +println(json.encode_pretty(get_response)) + +// List all jobs +println(term.blue('\nListing all jobs...')) +list_response := send_request(mut ws, OpenRPCRequest{ + jsonrpc: '2.0' + method: 'job.list' + params: []string{} + id: rand.int() +}) or { + eprintln(term.red('Failed to list jobs: ${err}')) + exit(1) +} +println(term.green('All jobs:')) +println(json.encode_pretty(list_response)) diff --git a/lib/core/jobs/openrpc/examples/server.vsh b/lib/core/jobs/openrpc/examples/server.vsh new file mode 100755 index 00000000..0e94d65a --- /dev/null +++ b/lib/core/jobs/openrpc/examples/server.vsh @@ -0,0 +1,40 @@ +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run + +import freeflowuniverse.herolib.core.jobs.openrpc +import freeflowuniverse.herolib.core.jobs.model +import time +import sync +import os + +fn start_rpc_server(mut wg sync.WaitGroup) ! { + defer { wg.done() } + + // Create OpenRPC server + openrpc.server_start()! +} + +fn start_ws_server(mut wg sync.WaitGroup) ! { + defer { wg.done() } + + // Get port from environment variable or use default + port := if ws_port := os.getenv_opt('WS_PORT') { + ws_port.int() + } else { + 8080 + } + + // Create and start WebSocket server + mut ws_server := openrpc.new_ws_server(port)! + ws_server.start()! +} + +// Create wait group for servers +mut wg := sync.new_waitgroup() +wg.add(2) + +// Start servers in separate threads +spawn start_rpc_server(mut wg) +spawn start_ws_server(mut wg) + +// Wait for servers to finish (they run forever) +wg.wait() diff --git a/lib/core/jobs/openrpc/factory.v b/lib/core/jobs/openrpc/factory.v new file mode 100644 index 00000000..1097bb90 --- /dev/null +++ b/lib/core/jobs/openrpc/factory.v @@ -0,0 +1,27 @@ +module openrpc + +import freeflowuniverse.herolib.core.redisclient +import freeflowuniverse.herolib.core.jobs.model + +// Generic OpenRPC server that handles all managers +pub struct OpenRPCServer { +mut: + redis &redisclient.Redis + queue &redisclient.RedisQueue + runner &model.HeroRunner +} + +// Create new OpenRPC server with Redis connection +pub fn server_start() ! { + redis := redisclient.core_get()! + mut runner := model.new()! + mut s := &OpenRPCServer{ + redis: redis + queue: &redisclient.RedisQueue{ + key: rpc_queue + redis: redis + } + runner: runner + } + s.start()! +} diff --git a/lib/core/jobs/openrpc/handler.v b/lib/core/jobs/openrpc/handler.v new file mode 100644 index 00000000..80d89b00 --- /dev/null +++ b/lib/core/jobs/openrpc/handler.v @@ -0,0 +1,68 @@ +module openrpc + +import freeflowuniverse.herolib.core.redisclient +import json + +// Start the server and listen for requests +pub fn (mut s OpenRPCServer) start() ! { + println('Starting OpenRPC server.') + + for { + // Get message from queue + msg := s.queue.get(5000)! + + if msg.len == 0 { + println("queue '${rpc_queue}' empty") + continue + } + + println("process '${msg}'") + + // Parse OpenRPC request + request := json.decode(OpenRPCRequest, msg) or { + println('Error decoding request: ${err}') + continue + } + + // Process request with appropriate handler + response := s.handle_request(request)! + + // Send response back to Redis using response queue + response_json := json.encode(response) + key := '${rpc_queue}:${request.id}' + println('response: \n${response}\n put on return queue ${key} ') + mut response_queue := &redisclient.RedisQueue{ + key: key + redis: s.redis + } + response_queue.add(response_json)! + } +} + +// Get the handler for a specific method based on its prefix +fn (mut s OpenRPCServer) handle_request(request OpenRPCRequest) !OpenRPCResponse { + method := request.method.to_lower() + println("process: method: '${method}'") + if method.starts_with('job.') { + return s.handle_request_job(request) or { + return rpc_response_error(request.id, 'error in request job:\n${err}') + } + } + if method.starts_with('agent.') { + return s.handle_request_agent(request) or { + return rpc_response_error(request.id, 'error in request agent:\n${err}') + } + } + if method.starts_with('group.') { + return s.handle_request_group(request) or { + return rpc_response_error(request.id, 'error in request group:\n${err}') + } + } + if method.starts_with('service.') { + return s.handle_request_service(request) or { + return rpc_response_error(request.id, 'error in request service:\n${err}') + } + } + + return rpc_response_error(request.id, 'Could not find handler for ${method}') +} diff --git a/lib/core/jobs/openrpc/handler_agent_manager.v b/lib/core/jobs/openrpc/handler_agent_manager.v new file mode 100644 index 00000000..10d873c9 --- /dev/null +++ b/lib/core/jobs/openrpc/handler_agent_manager.v @@ -0,0 +1,71 @@ +module openrpc + +import freeflowuniverse.herolib.core.jobs.model +import json + +pub fn (mut h OpenRPCServer) handle_request_agent(request OpenRPCRequest) !OpenRPCResponse { + mut response := rpc_response_new(request.id) + + method := request.method.all_after_first('agent.') + + println("request agent:'${method}'") + + match method { + 'new' { + agent := h.runner.agents.new() + response.result = json.encode(agent) + } + 'set' { + if request.params.len < 1 { + return error('Missing agent parameter') + } + agent := json.decode(model.Agent, request.params[0])! + h.runner.agents.set(agent)! + response.result = 'true' + } + 'get' { + if request.params.len < 1 { + return error('Missing pubkey parameter') + } + agent := h.runner.agents.get(request.params[0])! + response.result = json.encode(agent) + } + 'list' { + agents := h.runner.agents.list()! + response.result = json.encode(agents) + } + 'delete' { + if request.params.len < 1 { + return error('Missing pubkey parameter') + } + h.runner.agents.delete(request.params[0])! + response.result = 'true' + } + 'update_status' { + if request.params.len < 2 { + return error('Missing pubkey or status parameters') + } + status := match request.params[1] { + 'ok' { model.AgentState.ok } + 'down' { model.AgentState.down } + 'error' { model.AgentState.error } + 'halted' { model.AgentState.halted } + else { return error('Invalid status: ${request.params[1]}') } + } + h.runner.agents.update_status(request.params[0], status)! + response.result = 'true' + } + 'get_by_service' { + if request.params.len < 2 { + return error('Missing actor or action parameters') + } + agents := h.runner.agents.get_by_service(request.params[0], request.params[1])! + response.result = json.encode(agents) + } + else { + return error('Unknown method: ${request.method}') + } + } + + return response +} diff --git a/lib/core/jobs/openrpc/handler_group_manager.v b/lib/core/jobs/openrpc/handler_group_manager.v new file mode 100644 index 00000000..187b0c96 --- /dev/null +++ b/lib/core/jobs/openrpc/handler_group_manager.v @@ -0,0 +1,68 @@ +module openrpc + +import freeflowuniverse.herolib.core.jobs.model +import json + +pub fn (mut h OpenRPCServer) handle_request_group(request OpenRPCRequest) !OpenRPCResponse { + mut response := rpc_response_new(request.id) + method := request.method.all_after_first('group.') + println("request group:'${method}'") + match method { + 'new' { + group := h.runner.groups.new() + response.result = json.encode(group) + } + 'set' { + if request.params.len < 1 { + return error('Missing group parameter') + } + group := json.decode(model.Group, request.params[0])! + h.runner.groups.set(group)! + response.result = 'true' + } + 'get' { + if request.params.len < 1 { + return error('Missing guid parameter') + } + group := h.runner.groups.get(request.params[0])! + response.result = json.encode(group) + } + 'list' { + groups := h.runner.groups.list()! + response.result = json.encode(groups) + } + 'delete' { + if request.params.len < 1 { + return error('Missing guid parameter') + } + h.runner.groups.delete(request.params[0])! + response.result = 'true' + } + 'add_member' { + if request.params.len < 2 { + return error('Missing guid or member parameters') + } + h.runner.groups.add_member(request.params[0], request.params[1])! + response.result = 'true' + } + 'remove_member' { + if request.params.len < 2 { + return error('Missing guid or member parameters') + } + h.runner.groups.remove_member(request.params[0], request.params[1])! + response.result = 'true' + } + 'get_user_groups' { + if request.params.len < 1 { + return error('Missing user_pubkey parameter') + } + groups := h.runner.groups.get_user_groups(request.params[0])! + response.result = json.encode(groups) + } + else { + return error('Unknown method: ${request.method}') + } + } + + return response +} diff --git a/lib/core/jobs/openrpc/handler_job_manager.v b/lib/core/jobs/openrpc/handler_job_manager.v new file mode 100644 index 00000000..4cddda21 --- /dev/null +++ b/lib/core/jobs/openrpc/handler_job_manager.v @@ -0,0 +1,66 @@ +module openrpc + +import freeflowuniverse.herolib.core.jobs.model +import json + +pub fn (mut h OpenRPCServer) handle_request_job(request OpenRPCRequest) !OpenRPCResponse { + mut response := rpc_response_new(request.id) + + method := request.method.all_after_first('job.') + println("request job:'${method}'") + println(request) + match method { + 'new' { + job := h.runner.jobs.new() + response.result = json.encode(job) + } + 'set' { + if request.params.len < 1 { + return error('Missing job parameter') + } + job := json.decode(model.Job, request.params[0])! + h.runner.jobs.set(job)! + response.result = 'true' + } + 'get' { + if request.params.len < 1 { + return error('Missing guid parameter') + } + job := h.runner.jobs.get(request.params[0])! + response.result = json.encode(job) + } + 'list' { + jobs := h.runner.jobs.list()! + response.result = json.encode(jobs) + } + 'delete' { + if request.params.len < 1 { + return error('Missing guid parameter') + } + h.runner.jobs.delete(request.params[0])! + response.result = 'true' + } + 'update_status' { + if request.params.len < 2 { + return error('Missing guid or status parameters') + } + status := match request.params[1] { + 'created' { model.Status.created } + 'scheduled' { model.Status.scheduled } + 'planned' { model.Status.planned } + 'running' { model.Status.running } + 'error' { model.Status.error } + 'ok' { model.Status.ok } + else { return error('Invalid status: ${request.params[1]}') } + } + h.runner.jobs.update_status(request.params[0], status)! + job := h.runner.jobs.get(request.params[0])! // Get updated job to return + response.result = json.encode(job) + } + else { + return error('Unknown method: ${request.method}') + } + } + + return response +} diff --git a/lib/core/jobs/openrpc/handler_service_manager.v b/lib/core/jobs/openrpc/handler_service_manager.v new file mode 100644 index 00000000..b7a59cd2 --- /dev/null +++ b/lib/core/jobs/openrpc/handler_service_manager.v @@ -0,0 +1,80 @@ +module openrpc + +import freeflowuniverse.herolib.core.jobs.model +import json + +pub fn (mut h OpenRPCServer) handle_request_service(request OpenRPCRequest) !OpenRPCResponse { + mut response := rpc_response_new(request.id) + method := request.method.all_after_first('service.') + println("request service:'${method}'") + match method { + 'new' { + service := h.runner.services.new() + response.result = json.encode(service) + } + 'set' { + if request.params.len < 1 { + return error('Missing service parameter') + } + service := json.decode(model.Service, request.params[0])! + h.runner.services.set(service)! + response.result = 'true' + } + 'get' { + if request.params.len < 1 { + return error('Missing actor parameter') + } + service := h.runner.services.get(request.params[0])! + response.result = json.encode(service) + } + 'list' { + services := h.runner.services.list()! + response.result = json.encode(services) + } + 'delete' { + if request.params.len < 1 { + return error('Missing actor parameter') + } + h.runner.services.delete(request.params[0])! + response.result = 'true' + } + 'update_status' { + if request.params.len < 2 { + return error('Missing actor or status parameters') + } + status := match request.params[1] { + 'ok' { model.ServiceState.ok } + 'down' { model.ServiceState.down } + 'error' { model.ServiceState.error } + 'halted' { model.ServiceState.halted } + else { return error('Invalid status: ${request.params[1]}') } + } + h.runner.services.update_status(request.params[0], status)! + response.result = 'true' + } + 'get_by_action' { + if request.params.len < 1 { + return error('Missing action parameter') + } + services := h.runner.services.get_by_action(request.params[0])! + response.result = json.encode(services) + } + 'check_access' { + if request.params.len < 4 { + return error('Missing parameters: requires actor, action, user_pubkey, and groups') + } + // Parse groups array from JSON string + groups := json.decode([]string, request.params[3])! + has_access := h.runner.services.check_access(request.params[0], // actor + request.params[1], // action + request.params[2], // user_pubkey + groups)! + response.result = json.encode(has_access) + } + else { + return error('Unknown method: ${request.method}') + } + } + + return response +} diff --git a/lib/core/jobs/openrpc/model.v b/lib/core/jobs/openrpc/model.v new file mode 100644 index 00000000..47078b7a --- /dev/null +++ b/lib/core/jobs/openrpc/model.v @@ -0,0 +1,37 @@ +module openrpc + +// Generic OpenRPC request/response structures +pub struct OpenRPCRequest { +pub mut: + jsonrpc string @[required] + method string @[required] + params []string + id int @[required] +} + +pub struct OpenRPCResponse { +pub mut: + jsonrpc string @[required] + result string + error string + id int @[required] +} + +fn rpc_response_new(id int) OpenRPCResponse { + mut response := OpenRPCResponse{ + jsonrpc: '2.0' + id: id + } + return response +} + +fn rpc_response_error(id int, errormsg string) OpenRPCResponse { + mut response := OpenRPCResponse{ + jsonrpc: '2.0' + id: id + error: errormsg + } + return response +} + +const rpc_queue = 'herorunner:q:rpc' diff --git a/lib/core/jobs/openrpc/specs/agent_manager_openrpc.json b/lib/core/jobs/openrpc/specs/agent_manager_openrpc.json new file mode 100644 index 00000000..a87d4602 --- /dev/null +++ b/lib/core/jobs/openrpc/specs/agent_manager_openrpc.json @@ -0,0 +1,302 @@ +{ + "openrpc": "1.2.6", + "info": { + "title": "AgentManager Service", + "version": "1.0.0", + "description": "OpenRPC specification for the AgentManager module and its methods." + }, + "methods": [ + { + "name": "new", + "summary": "Create a new Agent instance", + "description": "Returns a new Agent with default or empty fields set. Caller can then fill in details.", + "params": [], + "result": { + "name": "Agent", + "description": "A freshly created Agent object.", + "schema": { + "$ref": "#/components/schemas/Agent" + } + } + }, + { + "name": "set", + "summary": "Add or update an Agent in the system", + "description": "Stores an Agent in Redis by pubkey. Overwrites any previous entry with the same pubkey.", + "params": [ + { + "name": "agent", + "description": "The Agent instance to be added or updated.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Agent" + } + } + ], + "result": { + "name": "success", + "description": "Indicates success. No data returned on success.", + "schema": { + "type": "boolean" + } + } + }, + { + "name": "get", + "summary": "Retrieve an Agent by its public key", + "description": "Looks up a single Agent using its pubkey.", + "params": [ + { + "name": "pubkey", + "description": "The public key to look up.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "Agent", + "description": "The Agent that was requested, if found.", + "schema": { + "$ref": "#/components/schemas/Agent" + } + } + }, + { + "name": "list", + "summary": "List all Agents", + "description": "Returns an array of all known Agents.", + "params": [], + "result": { + "name": "Agents", + "description": "A list of all Agents in the system.", + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Agent" + } + } + } + }, + { + "name": "delete", + "summary": "Delete an Agent by its public key", + "description": "Removes an Agent from the system by pubkey.", + "params": [ + { + "name": "pubkey", + "description": "The public key of the Agent to be deleted.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "success", + "description": "Indicates success. No data returned on success.", + "schema": { + "type": "boolean" + } + } + }, + { + "name": "update_status", + "summary": "Update the status of an Agent", + "description": "Updates only the status field of the specified Agent.", + "params": [ + { + "name": "pubkey", + "description": "Public key of the Agent to update.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "status", + "description": "The new status to set for the Agent.", + "required": true, + "schema": { + "$ref": "#/components/schemas/AgentState" + } + } + ], + "result": { + "name": "success", + "description": "Indicates success. No data returned on success.", + "schema": { + "type": "boolean" + } + } + }, + { + "name": "get_by_service", + "summary": "Retrieve all Agents that provide a specific service action", + "description": "Filters Agents by matching actor and action in any of their declared services.", + "params": [ + { + "name": "actor", + "description": "The actor name to match.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "action", + "description": "The action name to match.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "Agents", + "description": "A list of Agents that match the specified service actor and action.", + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Agent" + } + } + } + } + ], + "components": { + "schemas": { + "Agent": { + "type": "object", + "properties": { + "pubkey": { + "type": "string", + "description": "Public key (ed25519) of the Agent." + }, + "address": { + "type": "string", + "description": "Network address or domain where the Agent can be reached." + }, + "port": { + "type": "integer", + "description": "Network port for the Agent (default: 9999)." + }, + "description": { + "type": "string", + "description": "Optional human-readable description of the Agent." + }, + "status": { + "$ref": "#/components/schemas/AgentStatus" + }, + "services": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AgentService" + }, + "description": "List of public services provided by the Agent." + }, + "signature": { + "type": "string", + "description": "Signature (by the Agent's private key) of address+port+description+status." + } + }, + "required": ["pubkey", "status", "services"] + }, + "AgentStatus": { + "type": "object", + "properties": { + "guid": { + "type": "string", + "description": "Unique ID for the job or session." + }, + "timestamp_first": { + "$ref": "#/components/schemas/OurTime", + "description": "Timestamp when this Agent first came online." + }, + "timestamp_last": { + "$ref": "#/components/schemas/OurTime", + "description": "Timestamp of the last heartbeat or update from the Agent." + }, + "status": { + "$ref": "#/components/schemas/AgentState" + } + } + }, + "AgentService": { + "type": "object", + "properties": { + "actor": { + "type": "string", + "description": "The actor name providing the service." + }, + "actions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AgentServiceAction" + }, + "description": "List of actions available for this service." + }, + "description": { + "type": "string", + "description": "Optional human-readable description for the service." + }, + "status": { + "$ref": "#/components/schemas/AgentServiceState" + } + }, + "required": ["actor", "actions", "status"] + }, + "AgentServiceAction": { + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "Action name." + }, + "description": { + "type": "string", + "description": "Optional description of this action." + }, + "params": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Dictionary of parameter names to parameter descriptions." + }, + "params_example": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Example values for the parameters." + }, + "status": { + "$ref": "#/components/schemas/AgentServiceState" + }, + "public": { + "type": "boolean", + "description": "Indicates if the action is publicly accessible to all or restricted." + } + }, + "required": ["action", "status", "public"] + }, + "AgentState": { + "type": "string", + "enum": ["ok", "down", "error", "halted"], + "description": "Possible states of an Agent." + }, + "AgentServiceState": { + "type": "string", + "enum": ["ok", "down", "error", "halted"], + "description": "Possible states of an Agent service or action." + }, + "OurTime": { + "type": "string", + "format": "date-time", + "description": "Represents a date/time or timestamp value." + } + } + } +} diff --git a/lib/core/jobs/openrpc/specs/group_manager_openrpc.json b/lib/core/jobs/openrpc/specs/group_manager_openrpc.json new file mode 100644 index 00000000..e931ab87 --- /dev/null +++ b/lib/core/jobs/openrpc/specs/group_manager_openrpc.json @@ -0,0 +1,218 @@ +{ + "openrpc": "1.2.6", + "info": { + "title": "Group Manager API", + "version": "1.0.0", + "description": "An OpenRPC specification for Group Manager methods" + }, + "servers": [ + { + "name": "Local", + "url": "http://localhost:8080" + } + ], + "methods": [ + { + "name": "GroupManager.new", + "summary": "Create a new (in-memory) Group instance", + "description": "Creates a new group object. Note that this does NOT store it in Redis. The caller must set the group’s GUID and then call `GroupManager.set` if they wish to persist it.", + "params": [], + "result": { + "name": "group", + "description": "The newly-created group instance", + "schema": { + "$ref": "#/components/schemas/Group" + } + } + }, + { + "name": "GroupManager.set", + "summary": "Add or update a Group in Redis", + "description": "Stores the specified group in Redis using the group’s GUID as the key.", + "params": [ + { + "name": "group", + "description": "The group object to store", + "schema": { + "$ref": "#/components/schemas/Group" + } + } + ], + "result": { + "name": "result", + "description": "No return value", + "schema": { + "type": "null" + } + } + }, + { + "name": "GroupManager.get", + "summary": "Retrieve a Group by GUID", + "description": "Fetches the group from Redis using the provided GUID.", + "params": [ + { + "name": "guid", + "description": "The group’s unique identifier", + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "group", + "description": "The requested group", + "schema": { + "$ref": "#/components/schemas/Group" + } + } + }, + { + "name": "GroupManager.list", + "summary": "List all Groups", + "description": "Returns an array containing all groups stored in Redis.", + "params": [], + "result": { + "name": "groups", + "description": "All currently stored groups", + "schema": { + "$ref": "#/components/schemas/GroupList" + } + } + }, + { + "name": "GroupManager.delete", + "summary": "Delete a Group by GUID", + "description": "Removes the specified group from Redis by its GUID.", + "params": [ + { + "name": "guid", + "description": "The group’s unique identifier", + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "result", + "description": "No return value", + "schema": { + "type": "null" + } + } + }, + { + "name": "GroupManager.add_member", + "summary": "Add a member to a Group", + "description": "Adds a user pubkey or another group’s GUID to the member list of the specified group. Does not add duplicates.", + "params": [ + { + "name": "guid", + "description": "The target group’s unique identifier", + "schema": { + "type": "string" + } + }, + { + "name": "member", + "description": "Pubkey or group GUID to be added to the group", + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "result", + "description": "No return value", + "schema": { + "type": "null" + } + } + }, + { + "name": "GroupManager.remove_member", + "summary": "Remove a member from a Group", + "description": "Removes a user pubkey or another group’s GUID from the member list of the specified group.", + "params": [ + { + "name": "guid", + "description": "The target group’s unique identifier", + "schema": { + "type": "string" + } + }, + { + "name": "member", + "description": "Pubkey or group GUID to be removed from the group", + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "result", + "description": "No return value", + "schema": { + "type": "null" + } + } + }, + { + "name": "GroupManager.get_user_groups", + "summary": "List Groups that a user belongs to (directly or indirectly)", + "description": "Checks each group (and nested groups) to see if the user pubkey is a member, returning all groups in which the user is included (including membership through nested groups).", + "params": [ + { + "name": "user_pubkey", + "description": "The pubkey of the user to check", + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "groups", + "description": "A list of groups to which the user belongs", + "schema": { + "$ref": "#/components/schemas/GroupList" + } + } + } + ], + "components": { + "schemas": { + "Group": { + "type": "object", + "properties": { + "guid": { + "type": "string", + "description": "Unique ID for the group" + }, + "name": { + "type": "string", + "description": "Name of the group" + }, + "description": { + "type": "string", + "description": "Optional description of the group" + }, + "members": { + "type": "array", + "description": "List of user pubkeys or other group GUIDs", + "items": { + "type": "string" + } + } + }, + "required": ["guid", "members"] + }, + "GroupList": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + } + } + } + } + \ No newline at end of file diff --git a/lib/core/jobs/openrpc/specs/job_manager_openrpc.json b/lib/core/jobs/openrpc/specs/job_manager_openrpc.json new file mode 100644 index 00000000..c27efc9a --- /dev/null +++ b/lib/core/jobs/openrpc/specs/job_manager_openrpc.json @@ -0,0 +1,304 @@ +{ + "openrpc": "1.2.6", + "info": { + "title": "JobManager OpenRPC Specification", + "version": "1.0.0", + "description": "OpenRPC specification for the JobManager module which handles job operations." + }, + "servers": [ + { + "name": "Local", + "url": "http://localhost:8080/rpc" + } + ], + "methods": [ + { + "name": "newJob", + "summary": "Create a new Job instance", + "description": "Creates a new Job with default/empty values. The GUID is left empty for the caller to fill.", + "params": [], + "result": { + "name": "job", + "description": "A newly created Job object, not yet persisted.", + "schema": { + "$ref": "#/components/schemas/Job" + } + } + }, + { + "name": "setJob", + "summary": "Add or update a Job in the system (Redis)", + "description": "Persists the given Job into the data store. If the GUID already exists, the existing job is overwritten.", + "params": [ + { + "name": "job", + "description": "The Job object to store or update.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Job" + } + } + ], + "result": { + "name": "success", + "description": "Indicates if the operation was successful.", + "schema": { + "type": "boolean" + } + } + }, + { + "name": "getJob", + "summary": "Retrieve a Job by its GUID", + "description": "Fetches an existing Job from the data store using its unique GUID.", + "params": [ + { + "name": "guid", + "description": "The GUID of the Job to retrieve.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "job", + "description": "The retrieved Job object.", + "schema": { + "$ref": "#/components/schemas/Job" + } + } + }, + { + "name": "listJobs", + "summary": "List all Jobs", + "description": "Returns an array of all Jobs present in the data store.", + "params": [], + "result": { + "name": "jobs", + "description": "Array of all Job objects found.", + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + { + "name": "deleteJob", + "summary": "Remove a Job by its GUID", + "description": "Deletes a specific Job from the data store by its GUID.", + "params": [ + { + "name": "guid", + "description": "The GUID of the Job to delete.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "success", + "description": "Indicates if the job was successfully deleted.", + "schema": { + "type": "boolean" + } + } + }, + { + "name": "updateJobStatus", + "summary": "Update the status of a Job", + "description": "Sets the status field of a Job in the data store.", + "params": [ + { + "name": "guid", + "description": "The GUID of the Job to update.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "status", + "description": "The new status for the Job.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Status" + } + } + ], + "result": { + "name": "job", + "description": "The updated Job object with new status applied.", + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + ], + "components": { + "schemas": { + "Job": { + "type": "object", + "properties": { + "guid": { + "type": "string", + "description": "Unique ID for the Job." + }, + "agents": { + "type": "array", + "description": "Public keys of the agent(s) which will execute the command.", + "items": { + "type": "string" + } + }, + "source": { + "type": "string", + "description": "Pubkey of the agent who requested the job." + }, + "circle": { + "type": "string", + "description": "Digital-life circle name this Job belongs to.", + "default": "default" + }, + "context": { + "type": "string", + "description": "High-level context for the Job inside a circle.", + "default": "default" + }, + "actor": { + "type": "string", + "description": "Actor name that will handle the Job (e.g. `vm_manager`)." + }, + "action": { + "type": "string", + "description": "Action to be taken by the actor (e.g. `start`)." + }, + "params": { + "type": "object", + "description": "Key-value parameters for the action to be performed.", + "additionalProperties": { + "type": "string" + } + }, + "timeout_schedule": { + "type": "integer", + "description": "Timeout (in seconds) before the job is picked up by an agent.", + "default": 60 + }, + "timeout": { + "type": "integer", + "description": "Timeout (in seconds) for the job to complete.", + "default": 3600 + }, + "log": { + "type": "boolean", + "description": "Whether to log job details.", + "default": true + }, + "ignore_error": { + "type": "boolean", + "description": "If true, job errors do not cause an exception to be raised." + }, + "ignore_error_codes": { + "type": "array", + "description": "Array of error codes to ignore.", + "items": { + "type": "integer" + } + }, + "debug": { + "type": "boolean", + "description": "If true, additional debug information is provided.", + "default": false + }, + "retry": { + "type": "integer", + "description": "Number of retries allowed on error.", + "default": 0 + }, + "status": { + "$ref": "#/components/schemas/JobStatus" + }, + "dependencies": { + "type": "array", + "description": "List of job dependencies that must complete before this job executes.", + "items": { + "$ref": "#/components/schemas/JobDependency" + } + } + }, + "required": [ + "guid", + "status" + ] + }, + "JobStatus": { + "type": "object", + "properties": { + "guid": { + "type": "string", + "description": "Unique ID for the Job (mirrors the parent job GUID)." + }, + "created": { + "type": "string", + "format": "date-time", + "description": "When the job was created." + }, + "start": { + "type": "string", + "format": "date-time", + "description": "When the job was picked up to start." + }, + "end": { + "type": "string", + "format": "date-time", + "description": "When the job ended." + }, + "status": { + "$ref": "#/components/schemas/Status" + } + }, + "required": [ + "guid", + "created", + "status" + ] + }, + "JobDependency": { + "type": "object", + "properties": { + "guid": { + "type": "string", + "description": "Unique ID of the Job this dependency points to." + }, + "agents": { + "type": "array", + "description": "Possible agent(s) who can execute the dependency.", + "items": { + "type": "string" + } + } + }, + "required": [ + "guid" + ] + }, + "Status": { + "type": "string", + "enum": [ + "created", + "scheduled", + "planned", + "running", + "error", + "ok" + ], + "description": "Enumerates the possible states of a Job." + } + } + } + } + \ No newline at end of file diff --git a/lib/core/jobs/openrpc/specs/service_manager_openrpc.json b/lib/core/jobs/openrpc/specs/service_manager_openrpc.json new file mode 100644 index 00000000..c551787d --- /dev/null +++ b/lib/core/jobs/openrpc/specs/service_manager_openrpc.json @@ -0,0 +1,301 @@ +{ + "openrpc": "1.2.6", + "info": { + "title": "ServiceManager API", + "version": "1.0.0", + "description": "OpenRPC 2.0 spec for managing services with ServiceManager." + }, + "servers": [ + { + "name": "Local", + "url": "http://localhost:8080" + } + ], + "methods": [ + { + "name": "ServiceManager_new", + "summary": "Create a new Service instance (not saved to Redis yet).", + "description": "Creates and returns a new empty Service object with default values. The `actor` field remains empty until the caller sets it.", + "params": [], + "result": { + "name": "service", + "$ref": "#/components/schemas/Service" + } + }, + { + "name": "ServiceManager_set", + "summary": "Add or update a Service in Redis.", + "description": "Stores the Service in Redis, identified by its `actor` property.", + "params": [ + { + "name": "service", + "schema": { + "$ref": "#/components/schemas/Service" + } + } + ], + "result": { + "name": "success", + "schema": { + "type": "boolean", + "description": "True if operation succeeds." + } + } + }, + { + "name": "ServiceManager_get", + "summary": "Retrieve a Service by actor name.", + "description": "Gets the Service object from Redis using the given actor name.", + "params": [ + { + "name": "actor", + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "service", + "$ref": "#/components/schemas/Service" + } + }, + { + "name": "ServiceManager_list", + "summary": "List all Services.", + "description": "Returns an array of all Services stored in Redis.", + "params": [], + "result": { + "name": "services", + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Service" + } + } + } + }, + { + "name": "ServiceManager_delete", + "summary": "Delete a Service by actor name.", + "description": "Removes the Service from Redis using the given actor name.", + "params": [ + { + "name": "actor", + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "success", + "schema": { + "type": "boolean" + } + } + }, + { + "name": "ServiceManager_update_status", + "summary": "Update the status of a given Service.", + "description": "Updates only the `status` field of a Service specified by its actor name.", + "params": [ + { + "name": "actor", + "schema": { + "type": "string" + } + }, + { + "name": "status", + "schema": { + "$ref": "#/components/schemas/ServiceState" + } + } + ], + "result": { + "name": "success", + "schema": { + "type": "boolean" + } + } + }, + { + "name": "ServiceManager_get_by_action", + "summary": "Retrieve Services by action name.", + "description": "Returns all Services that provide the specified action.", + "params": [ + { + "name": "action", + "schema": { + "type": "string" + } + } + ], + "result": { + "name": "services", + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Service" + } + } + } + }, + { + "name": "ServiceManager_check_access", + "summary": "Check if a user has access to a Service action.", + "description": "Verifies if a user (and any groups they belong to) has the right to invoke a specified action on a given Service.", + "params": [ + { + "name": "actor", + "schema": { + "type": "string" + } + }, + { + "name": "action", + "schema": { + "type": "string" + } + }, + { + "name": "user_pubkey", + "schema": { + "type": "string" + } + }, + { + "name": "groups", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + ], + "result": { + "name": "hasAccess", + "schema": { + "type": "boolean" + } + } + } + ], + "components": { + "schemas": { + "Service": { + "type": "object", + "properties": { + "actor": { + "type": "string", + "description": "The actor (unique name) providing the service." + }, + "actions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ServiceAction" + }, + "description": "A list of actions available in this service." + }, + "description": { + "type": "string", + "description": "Optional description of the service." + }, + "status": { + "$ref": "#/components/schemas/ServiceState", + "description": "The current state of the service." + }, + "acl": { + "$ref": "#/components/schemas/ACL", + "description": "An optional access control list for the entire service." + } + }, + "required": ["actor", "actions", "status"] + }, + "ServiceAction": { + "type": "object", + "properties": { + "action": { + "type": "string", + "description": "A unique identifier for the action." + }, + "description": { + "type": "string", + "description": "Optional description of this action." + }, + "params": { + "type": "object", + "description": "Parameter definitions for this action.", + "additionalProperties": { + "type": "string" + } + }, + "params_example": { + "type": "object", + "description": "Example parameters for this action.", + "additionalProperties": { + "type": "string" + } + }, + "acl": { + "$ref": "#/components/schemas/ACL", + "description": "Optional ACL specifically for this action." + } + }, + "required": ["action"] + }, + "ACL": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "A friendly name for the ACL." + }, + "ace": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ACE" + }, + "description": "A list of Access Control Entries." + } + }, + "required": ["ace"] + }, + "ACE": { + "type": "object", + "properties": { + "groups": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of group IDs that have this permission." + }, + "users": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of user public keys that have this permission." + }, + "right": { + "type": "string", + "description": "Permission type (e.g. 'read', 'write', 'admin', 'block')." + } + }, + "required": ["right"] + }, + "ServiceState": { + "type": "string", + "enum": [ + "ok", + "down", + "error", + "halted" + ], + "description": "Possible states of a service." + } + } + } + } + \ No newline at end of file diff --git a/lib/core/jobs/openrpc/ws_server.v b/lib/core/jobs/openrpc/ws_server.v new file mode 100644 index 00000000..ded269c6 --- /dev/null +++ b/lib/core/jobs/openrpc/ws_server.v @@ -0,0 +1,93 @@ +module openrpc + +import net.websocket +import freeflowuniverse.herolib.core.redisclient +import json +import rand + +// WebSocket server that receives RPC requests +pub struct WSServer { +mut: + redis &redisclient.Redis + queue &redisclient.RedisQueue + port int = 8080 // Default port, can be configured +} + +// Create new WebSocket server +pub fn new_ws_server(port int) !&WSServer { + mut redis := redisclient.core_get()! + return &WSServer{ + redis: redis + queue: &redisclient.RedisQueue{ + key: rpc_queue + redis: redis + } + port: port + } +} + +// Start the WebSocket server +pub fn (mut s WSServer) start() ! { + mut ws_server := websocket.new_server(.ip, s.port, '') + + // Handle new WebSocket connections + ws_server.on_connect(fn (mut ws websocket.ServerClient) !bool { + println('New WebSocket client connected') + return true + })! + + // Handle client disconnections + ws_server.on_close(fn (mut ws websocket.Client, code int, reason string) ! { + println('WebSocket client disconnected (code: ${code}, reason: ${reason})') + }) + + // Handle incoming messages + ws_server.on_message(fn [mut s] (mut ws websocket.Client, msg &websocket.Message) ! { + if msg.opcode != .text_frame { + println('WebSocket unknown msg opcode (code: ${msg.opcode})') + return + } + + // Parse request + request := json.decode(OpenRPCRequest, msg.payload.bytestr()) or { + error_msg := '{"jsonrpc":"2.0","error":"Invalid JSON-RPC request","id":null}' + println(error_msg) + ws.write(error_msg.bytes(), websocket.OPCode.text_frame) or { panic(err) } + return + } + + // Generate unique request ID if not provided + mut req_id := request.id + if req_id == 0 { + req_id = rand.i32_in_range(1, 10000000)! + } + + println('WebSocket put on queue: \'${rpc_queue}\' (msg: ${msg.payload.bytestr()})') + // Send request to Redis queue + s.queue.add(msg.payload.bytestr())! + + returnkey := '${rpc_queue}:${req_id}' + mut queue_return := &redisclient.RedisQueue{ + key: returnkey + redis: s.redis + } + + // Wait for response + response := queue_return.get(30)! + if response.len < 2 { + error_msg := '{"jsonrpc":"2.0","error":"Timeout waiting for response","id":${req_id}}' + println('WebSocket error response (err: ${response})') + ws.write(error_msg.bytes(), websocket.OPCode.text_frame) or { panic(err) } + return + } + + println('WebSocket ok response (msg: ${response[1]})') + // Send response back to WebSocket client + response_str := response[1].str() + ws.write(response_str.bytes(), websocket.OPCode.text_frame) or { panic(err) } + }) + + // Start server + println('WebSocket server listening on port ${s.port}') + ws_server.listen() or { return error('Failed to start WebSocket server: ${err}') } +} diff --git a/lib/core/log/backend_db.v b/lib/core/log/backend_db.v deleted file mode 100644 index 2417ae8d..00000000 --- a/lib/core/log/backend_db.v +++ /dev/null @@ -1,25 +0,0 @@ -module log - -import db.sqlite - -pub struct DBBackend { -pub: - db sqlite.DB -} - -@[params] -pub struct DBBackendConfig { -pub: - db sqlite.DB -} - -// factory for -pub fn new_backend(config DBBackendConfig) !DBBackend { - sql config.db { - create table Log - } or { panic(err) } - - return DBBackend{ - db: config.db - } -} \ No newline at end of file diff --git a/lib/core/log/events.v b/lib/core/log/events.v deleted file mode 100644 index 555f35f0..00000000 --- a/lib/core/log/events.v +++ /dev/null @@ -1,10 +0,0 @@ -module log - -import time - -@[params] -pub struct ViewEvent { -pub mut: - page string - duration time.Duration -} \ No newline at end of file diff --git a/lib/core/log/factory.v b/lib/core/log/factory.v deleted file mode 100644 index 3547d0f2..00000000 --- a/lib/core/log/factory.v +++ /dev/null @@ -1,18 +0,0 @@ -module log - -import db.sqlite - -pub struct Logger { - db_path string - // DBBackend -} - -pub fn new(db_path string) !Logger { - db := sqlite.connect(db_path)! - sql db { - create table Log - } or { panic(err) } - return Logger{ - db_path: db_path - } -} diff --git a/lib/core/log/logger.v b/lib/core/log/logger.v deleted file mode 100644 index a91e82c4..00000000 --- a/lib/core/log/logger.v +++ /dev/null @@ -1,55 +0,0 @@ -module log - -import db.sqlite - -pub fn (logger Logger) new_log(log Log) ! { - db := sqlite.connect(logger.db_path)! - - sql db { - insert log into Log - }! -} - -pub struct LogFilter { - Log - matches_all bool - limit int -} - -pub fn (logger Logger) filter_logs(filter LogFilter) ![]Log { - db := sqlite.connect(logger.db_path)! - mut select_stmt := 'select * from Log' - - mut matchers := []string{} - if filter.event != '' { - matchers << "event == '${filter.event}'" - } - - if filter.subject != '' { - matchers << "subject == '${filter.subject}'" - } - - if filter.object != '' { - matchers << "object == '${filter.object}'" - } - - if matchers.len > 0 { - matchers_str := if filter.matches_all { - matchers.join(' AND ') - } else { - matchers.join(' OR ') - } - select_stmt += ' where ${matchers_str}' - } - - responses := db.exec(select_stmt)! - - mut logs := []Log{} - for response in responses { - logs << sql db { - select from Log where id == response.vals[0].int() - }! - } - - return logs -} \ No newline at end of file diff --git a/lib/core/log/model.v b/lib/core/log/model.v deleted file mode 100644 index 852b2021..00000000 --- a/lib/core/log/model.v +++ /dev/null @@ -1,32 +0,0 @@ -module log - -import time - -pub struct Log { - id int @[primary; sql: serial] -pub: - timestamp time.Time -pub mut: - event string - subject string - object string - message string // a custom message that can be attached to a log -} - -// pub struct Event { -// name string -// description string -// } - -// // log_request logs http requests -// pub fn create_log(log Log) Log { -// return Log{ -// ...log -// timestamp: time.now() -// }) -// } - -// // log_request logs http requests -// pub fn (mut a Analyzer) get_logs(subject string) []Log { -// return []Log{} -// } diff --git a/lib/core/pathlib/path_tools.v b/lib/core/pathlib/path_tools.v index 072b4f70..d492ccd7 100644 --- a/lib/core/pathlib/path_tools.v +++ b/lib/core/pathlib/path_tools.v @@ -76,7 +76,7 @@ pub fn (mut path Path) expand(dest string) !Path { if path.name().to_lower().ends_with('.tar.gz') || path.name().to_lower().ends_with('.tgz') { cmd := 'tar -xzvf ${path.path} -C ${desto.path}' - console.print_debug(cmd) + // console.print_debug(cmd) res := os.execute(cmd) if res.exit_code > 0 { return error('Could not expand.\n${res}') @@ -136,7 +136,7 @@ pub fn find_common_ancestor(paths_ []string) string { } } paths := paths_.map(os.abs_path(os.real_path(it))) // get the real path (symlinks... resolved) - console.print_debug(paths.str()) + // console.print_debug(paths.str()) parts := paths[0].split('/') mut totest_prev := '/' for i in 1 .. parts.len { @@ -223,7 +223,7 @@ pub fn (mut path Path) move(args MoveArgs) ! { // that last dir needs to move 1 up pub fn (mut path Path) moveup_single_subdir() ! { mut plist := path.list(recursive: false, ignoredefault: true, dirs_only: true)! - console.print_debug(plist.str()) + // console.print_debug(plist.str()) if plist.paths.len != 1 { return error('could not find one subdir in ${path.path} , so cannot move up') } diff --git a/lib/core/playcmds/play_core.v b/lib/core/playcmds/play_core.v index cad378a8..907c6df7 100644 --- a/lib/core/playcmds/play_core.v +++ b/lib/core/playcmds/play_core.v @@ -20,16 +20,15 @@ pub fn play_core(mut plbook playbook.PlayBook) ! { if p.exists('coderoot') { panic('implement') - mut coderoot := p.get_path_create('coderoot')! - - mut gs := gittools.get()! + // mut coderoot := p.get_path_create('coderoot')! + // mut gs := gittools.get()! } action.done = true } for mut action in plbook.find(filter: 'session.')! { - mut p := action.params - mut session := plbook.session + // mut p := action.params + // mut session := plbook.session //!!session.env_set key:'JWT_SHARED_KEY' val:'...' diff --git a/lib/core/playcmds/play_doctree.v b/lib/core/playcmds/play_doctree.v index e7dfe16a..6fee2a82 100644 --- a/lib/core/playcmds/play_doctree.v +++ b/lib/core/playcmds/play_doctree.v @@ -28,11 +28,11 @@ pub fn play_doctree(mut plbook playbook.PlayBook) ! { for mut action in plbook.find(filter: 'doctree:add')! { mut p := action.params - url := p.get_default('url', '')! - path := p.get_default('path', '')! + // url := p.get_default('url', '')! + // path := p.get_default('path', '')! name := p.get('name')! - mut tree := trees[name] or { return error('tree ${name} not found') } + _ := trees[name] or { return error('tree ${name} not found') } // tree.scan( // path: path @@ -63,7 +63,7 @@ pub fn play_doctree(mut plbook playbook.PlayBook) ! { for mut action in plbook.find(filter: 'doctree:export')! { panic('implement') mut p := action.params - name := p.get('name')! + _ := p.get('name')! action.done = true } } diff --git a/lib/core/playcmds/play_luadns.v b/lib/core/playcmds/play_luadns.v index 350dcd17..fd9a2ae9 100644 --- a/lib/core/playcmds/play_luadns.v +++ b/lib/core/playcmds/play_luadns.v @@ -5,12 +5,13 @@ import freeflowuniverse.herolib.core.playbook import os pub fn play_luadns(mut plbook playbook.PlayBook) ! { - mut buildroot := '${os.home_dir()}/hero/var/mdbuild' - mut publishroot := '${os.home_dir()}/hero/www/info' - mut coderoot := '' + // Variables below are not used, commenting them out + // mut buildroot := '${os.home_dir()}/hero/var/mdbuild' + // mut publishroot := '${os.home_dir()}/hero/www/info' + // mut coderoot := '' // mut install := false - mut reset := false - mut pull := false + // mut reset := false + // mut pull := false for mut action in plbook.find(filter: 'luadns.set_domain')! { mut p := action.params diff --git a/lib/core/redisclient/rpc_test.v b/lib/core/redisclient/rpc_test.v index 5dbc5713..59823af4 100644 --- a/lib/core/redisclient/rpc_test.v +++ b/lib/core/redisclient/rpc_test.v @@ -5,7 +5,7 @@ fn setup() !&redisclient.Redis { mut redis := redisclient.core_get()! // Select db 10 to be away from default one '0' redis.selectdb(10) or { panic(err) } - return &redis + return redis } fn cleanup(mut redis redisclient.Redis) ! { @@ -25,7 +25,8 @@ fn test_rpc() { mut r := redis.rpc_get('testrpc') r.call(cmd: 'test.cmd', data: 'this is my data, normally json', wait: false)! - returnqueue := r.process(10000, process_test)! + + returnqueue := r.process(process_test, timeout: 10000)! mut res := r.result(10000, returnqueue)! console.print_debug(res) diff --git a/lib/data/cache/README.md b/lib/data/cache/README.md new file mode 100644 index 00000000..d0bb068b --- /dev/null +++ b/lib/data/cache/README.md @@ -0,0 +1,139 @@ +# HeroLib Cache System + +A high-performance, generic in-memory caching system for V with support for TTL, size limits, and LRU eviction. + +## Features + +- Generic type support (can cache any type) +- Configurable maximum entries and memory size limits +- Time-To-Live (TTL) support +- Least Recently Used (LRU) eviction policy +- Memory-aware caching with size-based eviction +- Thread-safe operations +- Optional persistence support (configurable) + +## Configuration + +The cache system is highly configurable through the `CacheConfig` struct: + +```v +pub struct CacheConfig { +pub mut: + max_entries u32 = 1000 // Maximum number of entries + max_size_mb f64 = 100.0 // Maximum cache size in MB + ttl_seconds i64 = 3600 // Time-to-live in seconds (0 = no TTL) + eviction_ratio f64 = 0.05 // Percentage of entries to evict when full (5%) + persist bool // Whether to persist cache to disk +} +``` + +## Basic Usage + +Here's a simple example of using the cache: + +```v +import freeflowuniverse.herolib.data.cache + +// Define your struct type +@[heap] +struct User { + id u32 + name string + age int +} + +fn main() { + // Create a cache with default configuration + mut user_cache := cache.new_cache[User]() + + // Create a user + user := &User{ + id: 1 + name: 'Alice' + age: 30 + } + + // Add to cache + user_cache.set(user.id, user) + + // Retrieve from cache + if cached_user := user_cache.get(1) { + println('Found user: ${cached_user.name}') + } +} +``` + +## Advanced Usage + +### Custom Configuration + +```v +mut user_cache := cache.new_cache[User]( + max_entries: 1000 // Maximum number of entries + max_size_mb: 10.0 // Maximum cache size in MB + ttl_seconds: 300 // Items expire after 5 minutes + eviction_ratio: 0.2 // Evict 20% of entries when full +) +``` + +### Memory Management + +The cache automatically manages memory using two mechanisms: + +1. **Entry Count Limit**: When `max_entries` is reached, least recently used items are evicted. +2. **Memory Size Limit**: When `max_size_mb` is reached, items are evicted based on the `eviction_ratio`. + +```v +// Create a cache with strict memory limits +config := cache.CacheConfig{ + max_entries: 100 // Only keep 100 entries maximum + max_size_mb: 1.0 // Limit cache to 1MB + eviction_ratio: 0.1 // Remove 10% of entries when full +} +``` + +### Cache Operations + +```v +mut cache := cache.new_cache[User](cache.CacheConfig{}) + +// Add/update items +cache.set(1, user1) +cache.set(2, user2) + +// Get items +if user := cache.get(1) { + // Use cached user +} + +// Check cache size +println('Cache entries: ${cache.len()}') + +// Clear the cache +cache.clear() +``` + +## Best Practices + +1. **Choose Appropriate TTL**: Set TTL based on how frequently your data changes and how critical freshness is. + +2. **Memory Management**: + - Set reasonable `max_entries` and `max_size_mb` limits based on your application's memory constraints + - Monitor cache size using `len()` + - Use appropriate `eviction_ratio` (typically 0.05-0.2) to balance performance and memory usage + +3. **Type Safety**: + - Always use `@[heap]` attribute for structs stored in cache + - Ensure cached types are properly memory managed + +4. **Error Handling**: + - Always use option types when retrieving items (`if value := cache.get(key) {`) + - Handle cache misses gracefully + +5. **Performance**: + - Consider the trade-off between cache size and hit rate + - Monitor and adjust TTL and eviction settings based on usage patterns + +## Thread Safety + +The cache implementation is thread-safe for concurrent access. However, when using the cache in a multi-threaded environment, ensure proper synchronization when accessing cached objects. diff --git a/lib/data/cache/cache.v b/lib/data/cache/cache.v new file mode 100644 index 00000000..fd04bb68 --- /dev/null +++ b/lib/data/cache/cache.v @@ -0,0 +1,167 @@ +module cache + +import time +import math + +// CacheConfig holds cache configuration parameters +pub struct CacheConfig { +pub mut: + max_entries u32 = 1000 // Maximum number of entries + max_size_mb f64 = 100.0 // Maximum cache size in MB + ttl_seconds i64 = 3600 // Time-to-live in seconds (0 = no TTL) + eviction_ratio f64 = 0.05 // Percentage of entries to evict when full (5%) +} + +// CacheEntry represents a cached object with its metadata +@[heap] +struct CacheEntry[T] { +mut: + obj T // Reference to the cached object + last_access i64 // Unix timestamp of last access + created_at i64 // Unix timestamp of creation + size u32 // Approximate size in bytes +} + +// Cache manages the in-memory caching of objects +pub struct Cache[T] { +mut: + entries map[u32]&CacheEntry[T] // Map of object ID to cache entry + config CacheConfig // Cache configuration + access_log []u32 // Ordered list of object IDs by access time + total_size u64 // Total size of cached entries in bytes +} + +// new_cache creates a new cache instance with the given configuration +pub fn new_cache[T](config CacheConfig) &Cache[T] { + return &Cache[T]{ + entries: map[u32]&CacheEntry[T]{} + config: config + access_log: []u32{cap: int(config.max_entries)} + total_size: 0 + } +} + +// get retrieves an object from the cache if it exists +pub fn (mut c Cache[T]) get(id u32) ?&T { + if entry := c.entries[id] { + now := time.now().unix() + + // Check TTL + if c.config.ttl_seconds > 0 { + if (now - entry.created_at) > c.config.ttl_seconds { + c.remove(id) + return none + } + } + + // Update access time + unsafe { + entry.last_access = now + } + // Move ID to end of access log + idx := c.access_log.index(id) + if idx >= 0 { + c.access_log.delete(idx) + } + c.access_log << id + + return &entry.obj + } + return none +} + +// set adds or updates an object in the cache +pub fn (mut c Cache[T]) set(id u32, obj &T) { + now := time.now().unix() + + // Calculate entry size (approximate) + entry_size := sizeof(T) + sizeof(CacheEntry[T]) + + // Check memory and entry count limits + new_total := c.total_size + u64(entry_size) + max_bytes := u64(c.config.max_size_mb * 1024 * 1024) + + // Always evict if we're at or above max_entries + if c.entries.len >= int(c.config.max_entries) { + c.evict() + } else if new_total > max_bytes { + // Otherwise evict only if we're over memory limit + c.evict() + } + + // Create new entry + entry := &CacheEntry[T]{ + obj: *obj + last_access: now + created_at: now + size: u32(entry_size) + } + + // Update total size + if old := c.entries[id] { + c.total_size -= u64(old.size) + } + c.total_size += u64(entry_size) + + // Add to entries map + c.entries[id] = entry + + // Update access log + idx := c.access_log.index(id) + if idx >= 0 { + c.access_log.delete(idx) + } + c.access_log << id + + // Ensure access_log stays in sync with entries + if c.access_log.len > c.entries.len { + c.access_log = c.access_log[c.access_log.len - c.entries.len..] + } +} + +// evict removes entries based on configured eviction ratio +fn (mut c Cache[T]) evict() { + // If we're at max entries, remove enough to get to 80% capacity + target_size := int(c.config.max_entries) * 8 / 10 // 80% + num_to_evict := if c.entries.len >= int(c.config.max_entries) { + c.entries.len - target_size + } else { + math.max(1, int(c.entries.len * c.config.eviction_ratio)) + } + + if num_to_evict > 0 { + // Remove oldest entries + mut evicted_size := u64(0) + for i := 0; i < num_to_evict && i < c.access_log.len; i++ { + id := c.access_log[i] + if entry := c.entries[id] { + evicted_size += u64(entry.size) + c.entries.delete(id) + } + } + + // Update total size and access log + c.total_size -= evicted_size + c.access_log = c.access_log[num_to_evict..] + } +} + +// remove deletes a single entry from the cache +pub fn (mut c Cache[T]) remove(id u32) { + if entry := c.entries[id] { + c.total_size -= u64(entry.size) + } + c.entries.delete(id) +} + +// clear empties the cache +pub fn (mut c Cache[T]) clear() { + c.entries.clear() + c.access_log.clear() + c.total_size = 0 +} + +// len returns the number of entries in the cache +pub fn (c &Cache[T]) len() int { + return c.entries.len +} diff --git a/lib/data/cache/cache_test.v b/lib/data/cache/cache_test.v new file mode 100644 index 00000000..62f1bd56 --- /dev/null +++ b/lib/data/cache/cache_test.v @@ -0,0 +1,152 @@ +module cache + +import time + +@[heap] +struct TestData { + value string +} + +fn test_cache_creation() { + config := CacheConfig{ + max_entries: 100 + max_size_mb: 1.0 + ttl_seconds: 60 + eviction_ratio: 0.1 + } + mut cache := new_cache[TestData](config) + assert cache.len() == 0 + assert cache.config.max_entries == 100 + assert cache.config.max_size_mb == 1.0 + assert cache.config.ttl_seconds == 60 + assert cache.config.eviction_ratio == 0.1 +} + +fn test_cache_set_get() { + mut cache := new_cache[TestData](CacheConfig{}) + data := &TestData{ + value: 'test' + } + + cache.set(1, data) + assert cache.len() == 1 + + if cached := cache.get(1) { + assert cached.value == 'test' + } else { + assert false, 'Failed to get cached item' + } + + if _ := cache.get(2) { + assert false, 'Should not find non-existent item' + } +} + +fn test_cache_ttl() { + $if debug { + eprintln('> test_cache_ttl') + } + mut cache := new_cache[TestData](CacheConfig{ + ttl_seconds: 1 + }) + data := &TestData{ + value: 'test' + } + + cache.set(1, data) + assert cache.len() == 1 + + if cached := cache.get(1) { + assert cached.value == 'test' + } + + time.sleep(2 * time.second) + $if debug { + eprintln('> waited 2 seconds') + } + + if _ := cache.get(1) { + assert false, 'Item should have expired' + } + assert cache.len() == 0 +} + +fn test_cache_eviction() { + mut cache := new_cache[TestData](CacheConfig{ + max_entries: 2 + eviction_ratio: 0.5 + }) + + data1 := &TestData{ + value: 'one' + } + data2 := &TestData{ + value: 'two' + } + data3 := &TestData{ + value: 'three' + } + + cache.set(1, data1) + cache.set(2, data2) + assert cache.len() == 2 + + // Access data1 to make it more recently used + cache.get(1) + + // Adding data3 should trigger eviction of data2 (least recently used) + cache.set(3, data3) + assert cache.len() == 2 + + if _ := cache.get(2) { + assert false, 'Item 2 should have been evicted' + } + + if cached := cache.get(1) { + assert cached.value == 'one' + } else { + assert false, 'Item 1 should still be cached' + } + + if cached := cache.get(3) { + assert cached.value == 'three' + } else { + assert false, 'Item 3 should be cached' + } +} + +fn test_cache_clear() { + mut cache := new_cache[TestData](CacheConfig{}) + data := &TestData{ + value: 'test' + } + + cache.set(1, data) + assert cache.len() == 1 + + cache.clear() + assert cache.len() == 0 + + if _ := cache.get(1) { + assert false, 'Cache should be empty after clear' + } +} + +fn test_cache_size_limit() { + // Set a very small size limit to force eviction + mut cache := new_cache[TestData](CacheConfig{ + max_size_mb: 0.0001 // ~100 bytes + eviction_ratio: 0.5 + }) + + // Add multiple entries to exceed size limit + for i := u32(0); i < 10; i++ { + data := &TestData{ + value: 'test${i}' + } + cache.set(i, data) + } + + // Cache should have evicted some entries to stay under size limit + assert cache.len() < 10 +} diff --git a/lib/data/doctree/process_includes.v b/lib/data/doctree/process_includes.v index 1fbe8bcb..0db4baaf 100644 --- a/lib/data/doctree/process_includes.v +++ b/lib/data/doctree/process_includes.v @@ -33,7 +33,7 @@ pub fn (mut tree Tree) process_includes() ! { for queue.len > 0 { front := queue[0] - queue = queue[1..] + queue = queue[1..].clone() mut page := tree.page_get(front)! mut col := tree.get_collection(page.collection_name)! diff --git a/lib/data/doctree/scan.v b/lib/data/doctree/scan.v index 6d628d56..902f8a45 100644 --- a/lib/data/doctree/scan.v +++ b/lib/data/doctree/scan.v @@ -34,10 +34,9 @@ pub fn (mut tree Tree) scan(args_ TreeScannerArgs) ! { if args.git_url.len > 0 { mut gs := gittools.get(coderoot: args.git_root)! mut repo := gs.get_repo( - url: args.git_url - pull: args.git_pull - reset: args.git_reset - reload: false + url: args.git_url + pull: args.git_pull + reset: args.git_reset )! args.path = repo.get_path_of_url(args.git_url)! } @@ -89,10 +88,9 @@ pub fn (mut tree Tree) scan_concurrent(args_ TreeScannerArgs) ! { if args.git_url.len > 0 { mut gs := gittools.get(coderoot: args.git_root)! mut repo := gs.get_repo( - url: args.git_url - pull: args.git_pull - reset: args.git_reset - reload: false + url: args.git_url + pull: args.git_pull + reset: args.git_reset )! args.path = repo.get_path_of_url(args.git_url)! } diff --git a/lib/data/encoder/auto.v b/lib/data/encoder/auto.v index e47f02d9..627ca8a2 100644 --- a/lib/data/encoder/auto.v +++ b/lib/data/encoder/auto.v @@ -70,37 +70,37 @@ pub fn decode[T](data []u8) !T { // Primitive types $if field.typ is string { // $(string_expr) produces an identifier - result.$(field.name) = d.get_string() + result.$(field.name) = d.get_string()! } $else $if field.typ is int { - result.$(field.name) = d.get_int() + result.$(field.name) = d.get_int()! } $else $if field.typ is u8 { - result.$(field.name) = d.get_u8() + result.$(field.name) = d.get_u8()! } $else $if field.typ is u16 { - result.$(field.name) = d.get_u16() + result.$(field.name) = d.get_u16()! } $else $if field.typ is u32 { - result.$(field.name) = d.get_u32() + result.$(field.name) = d.get_u32()! } $else $if field.typ is u64 { - result.$(field.name) = d.get_u64() + result.$(field.name) = d.get_u64()! } $else $if field.typ is time.Time { - result.$(field.name) = d.get_time() + result.$(field.name) = d.get_time()! // Arrays of primitive types } $else $if field.typ is []string { - result.$(field.name) = d.get_list_string() + result.$(field.name) = d.get_list_string()! } $else $if field.typ is []int { - result.$(field.name) = d.get_list_int() + result.$(field.name) = d.get_list_int()! } $else $if field.typ is []u8 { - result.$(field.name) = d.get_list_u8() + result.$(field.name) = d.get_list_u8()! } $else $if field.typ is []u16 { - result.$(field.name) = d.get_list_u16() + result.$(field.name) = d.get_list_u16()! } $else $if field.typ is []u32 { - result.$(field.name) = d.get_list_u32() + result.$(field.name) = d.get_list_u32()! } $else $if field.typ is []u64 { - result.$(field.name) = d.get_list_u64() + result.$(field.name) = d.get_list_u64()! // Maps of primitive types } $else $if field.typ is map[string]string { - result.$(field.name) = d.get_map_string() + result.$(field.name) = d.get_map_string()! } $else $if field.typ is map[string][]u8 { - result.$(field.name) = d.get_map_bytes() + result.$(field.name) = d.get_map_bytes()! // Structs } $else $if field.is_struct { // TODO handle recursive behavior diff --git a/lib/data/encoder/encoder_decode.v b/lib/data/encoder/encoder_decode.v index 7287906d..b2ff1c81 100644 --- a/lib/data/encoder/encoder_decode.v +++ b/lib/data/encoder/encoder_decode.v @@ -17,138 +17,201 @@ pub fn decoder_new(data []u8) Decoder { return e } -pub fn (mut d Decoder) get_string() string { - n := d.get_u16() - v := d.data[..n] +pub fn (mut d Decoder) get_string() !string { + n := d.get_u16()! + if n > 64 * 1024 { // 64KB limit + return error('string length ${n} exceeds 64KB limit') + } + if n > d.data.len { + return error('string length ${n} exceeds remaining data length ${d.data.len}') + } + mut bytes := []u8{len: int(n)} + for i in 0 .. n { + bytes[i] = d.data[i] + } d.data.delete_many(0, n) - return v.bytestr() + return bytes.bytestr() } -pub fn (mut d Decoder) get_int() int { - return int(d.get_u32()) +pub fn (mut d Decoder) get_int() !int { + return int(d.get_u32()!) } -pub fn (mut d Decoder) get_bytes() []u8 { - n := int(d.get_u32()) - v := d.data[..n] +pub fn (mut d Decoder) get_bytes() ![]u8 { + n := int(d.get_u32()!) + if n > 64 * 1024 { // 64KB limit + return error('bytes length ${n} exceeds 64KB limit') + } + if n > d.data.len { + return error('bytes length ${n} exceeds remaining data length ${d.data.len}') + } + mut bytes := []u8{len: int(n)} + for i in 0 .. n { + bytes[i] = d.data[i] + } d.data.delete_many(0, n) - return v + return bytes } // adds u16 length of string in bytes + the bytes -pub fn (mut d Decoder) get_u8() u8 { - // remove first byte, this corresponds to u8, so the data bytestring becomes 1 byte shorter +pub fn (mut d Decoder) get_u8() !u8 { + if d.data.len < 1 { + return error('not enough data for u8') + } v := d.data.first() d.data.delete(0) return v } -pub fn (mut d Decoder) get_u16() u16 { - v := d.data[..2] +pub fn (mut d Decoder) get_u16() !u16 { + if d.data.len < 2 { + return error('not enough data for u16') + } + mut bytes := []u8{len: 2} + bytes[0] = d.data[0] + bytes[1] = d.data[1] d.data.delete_many(0, 2) - return bin.little_endian_u16(v) + return bin.little_endian_u16(bytes) } -pub fn (mut d Decoder) get_u32() u32 { - v := d.data[..4] +pub fn (mut d Decoder) get_u32() !u32 { + if d.data.len < 4 { + return error('not enough data for u32') + } + mut bytes := []u8{len: 4} + bytes[0] = d.data[0] + bytes[1] = d.data[1] + bytes[2] = d.data[2] + bytes[3] = d.data[3] d.data.delete_many(0, 4) - return bin.little_endian_u32(v) + return bin.little_endian_u32(bytes) } -pub fn (mut d Decoder) get_u64() u64 { - v := d.data[..8] +pub fn (mut d Decoder) get_u64() !u64 { + if d.data.len < 8 { + return error('not enough data for u64') + } + mut bytes := []u8{len: 8} + bytes[0] = d.data[0] + bytes[1] = d.data[1] + bytes[2] = d.data[2] + bytes[3] = d.data[3] + bytes[4] = d.data[4] + bytes[5] = d.data[5] + bytes[6] = d.data[6] + bytes[7] = d.data[7] d.data.delete_many(0, 8) - return bin.little_endian_u64(v) + return bin.little_endian_u64(bytes) } -pub fn (mut d Decoder) get_i64() i64 { - v := d.data[..8] +pub fn (mut d Decoder) get_i64() !i64 { + if d.data.len < 8 { + return error('not enough data for i64') + } + mut bytes := []u8{len: 8} + bytes[0] = d.data[0] + bytes[1] = d.data[1] + bytes[2] = d.data[2] + bytes[3] = d.data[3] + bytes[4] = d.data[4] + bytes[5] = d.data[5] + bytes[6] = d.data[6] + bytes[7] = d.data[7] d.data.delete_many(0, 8) - return u64(bin.little_endian_u64(v)) + return u64(bin.little_endian_u64(bytes)) } -pub fn (mut d Decoder) get_time() time.Time { - nano_time := d.get_i64() +pub fn (mut d Decoder) get_time() !time.Time { + nano_time := d.get_i64()! seconds := nano_time / int(1e9) nano_seconds := int(nano_time % int(1e9)) return time.unix_nanosecond(seconds, nano_seconds) } -pub fn (mut d Decoder) get_ourtime() ourtime.OurTime { +pub fn (mut d Decoder) get_ourtime() !ourtime.OurTime { return ourtime.OurTime{ - unixt: d.get_i64() + unixt: d.get_i64()! } } -pub fn (mut d Decoder) get_list_string() []string { - n := d.get_u16() +pub fn (mut d Decoder) get_list_string() ![]string { + n := d.get_u16()! mut v := []string{len: int(n)} for i in 0 .. n { - v[i] = d.get_string() + v[i] = d.get_string()! } return v } -pub fn (mut d Decoder) get_list_int() []int { - n := d.get_u16() +pub fn (mut d Decoder) get_list_int() ![]int { + n := d.get_u16()! mut v := []int{len: int(n)} for i in 0 .. n { - v[i] = d.get_int() + v[i] = d.get_int()! } return v } -pub fn (mut d Decoder) get_list_u8() []u8 { - n := d.get_u16() - v := d.data[..n] +pub fn (mut d Decoder) get_list_u8() ![]u8 { + n := d.get_u16()! + if n > 64 * 1024 { // 64KB limit + return error('list length ${n} exceeds 64KB limit') + } + if n > d.data.len { + return error('list length ${n} exceeds remaining data length ${d.data.len}') + } + mut bytes := []u8{len: int(n)} + for i in 0 .. n { + bytes[i] = d.data[i] + } d.data.delete_many(0, n) - return v + return bytes } -pub fn (mut d Decoder) get_list_u16() []u16 { - n := d.get_u16() +pub fn (mut d Decoder) get_list_u16() ![]u16 { + n := d.get_u16()! mut v := []u16{len: int(n)} for i in 0 .. n { - v[i] = d.get_u16() + v[i] = d.get_u16()! } return v } -pub fn (mut d Decoder) get_list_u32() []u32 { - n := d.get_u16() +pub fn (mut d Decoder) get_list_u32() ![]u32 { + n := d.get_u16()! mut v := []u32{len: int(n)} for i in 0 .. n { - v[i] = d.get_u32() + v[i] = d.get_u32()! } return v } -pub fn (mut d Decoder) get_list_u64() []u64 { - n := d.get_u16() +pub fn (mut d Decoder) get_list_u64() ![]u64 { + n := d.get_u16()! mut v := []u64{len: int(n)} for i in 0 .. n { - v[i] = d.get_u64() + v[i] = d.get_u64()! } return v } -pub fn (mut d Decoder) get_map_string() map[string]string { - n := d.get_u16() +pub fn (mut d Decoder) get_map_string() !map[string]string { + n := d.get_u16()! mut v := map[string]string{} for _ in 0 .. n { - key := d.get_string() - val := d.get_string() + key := d.get_string()! + val := d.get_string()! v[key] = val } return v } -pub fn (mut d Decoder) get_map_bytes() map[string][]u8 { - n := d.get_u16() +pub fn (mut d Decoder) get_map_bytes() !map[string][]u8 { + n := d.get_u16()! mut v := map[string][]u8{} for _ in 0 .. n { - key := d.get_string() - val := d.get_bytes() + key := d.get_string()! + val := d.get_bytes()! v[key] = val } return v diff --git a/lib/data/encoder/encoder_test.v b/lib/data/encoder/encoder_test.v index a7b58b14..c1b63afc 100644 --- a/lib/data/encoder/encoder_test.v +++ b/lib/data/encoder/encoder_test.v @@ -11,8 +11,8 @@ fn test_string() { assert e.data == [u8(1), 0, 97, 2, 0, 98, 99] mut d := decoder_new(e.data) - assert d.get_string() == 'a' - assert d.get_string() == 'bc' + assert d.get_string()! == 'a' + assert d.get_string()! == 'bc' } fn test_int() { @@ -22,8 +22,8 @@ fn test_int() { assert e.data == [u8(0x00), 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x7f] mut d := decoder_new(e.data) - assert d.get_int() == min_i32 - assert d.get_int() == max_i32 + assert d.get_int()! == min_i32 + assert d.get_int()! == max_i32 } fn test_bytes() { @@ -34,7 +34,7 @@ fn test_bytes() { assert e.data == [u8(6), 0, 97, 98, 99, 100, 101, 102] mut d := decoder_new(e.data) - assert d.get_list_u8() == sb + assert d.get_list_u8()! == sb } fn test_u8() { @@ -44,8 +44,8 @@ fn test_u8() { assert e.data == [u8(0x00), 0xff] mut d := decoder_new(e.data) - assert d.get_u8() == min_u8 - assert d.get_u8() == max_u8 + assert d.get_u8()! == min_u8 + assert d.get_u8()! == max_u8 } fn test_u16() { @@ -55,8 +55,8 @@ fn test_u16() { assert e.data == [u8(0x00), 0x00, 0xff, 0xff] mut d := decoder_new(e.data) - assert d.get_u16() == min_u16 - assert d.get_u16() == max_u16 + assert d.get_u16()! == min_u16 + assert d.get_u16()! == max_u16 } fn test_u32() { @@ -66,8 +66,8 @@ fn test_u32() { assert e.data == [u8(0x00), 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff] mut d := decoder_new(e.data) - assert d.get_u32() == min_u32 - assert d.get_u32() == max_u32 + assert d.get_u32()! == min_u32 + assert d.get_u32()! == max_u32 } fn test_u64() { @@ -78,8 +78,8 @@ fn test_u64() { 0xff, 0xff, 0xff, 0xff] mut d := decoder_new(e.data) - assert d.get_u64() == min_u64 - assert d.get_u64() == max_u64 + assert d.get_u64()! == min_u64 + assert d.get_u64()! == max_u64 } fn test_time() { @@ -88,7 +88,7 @@ fn test_time() { e.add_time(t) mut d := decoder_new(e.data) - assert d.get_time() == t + assert d.get_time()! == t } fn test_list_string() { @@ -99,7 +99,7 @@ fn test_list_string() { assert e.data == [u8(3), 0, 1, 0, 97, 2, 0, 98, 99, 3, 0, 100, 101, 102] mut d := decoder_new(e.data) - assert d.get_list_string() == list + assert d.get_list_string()! == list } fn test_list_int() { @@ -110,7 +110,7 @@ fn test_list_int() { assert e.data == [u8(3), 0, 0x95, 0xea, 0x2f, 0x87, 0, 0, 0, 0, 0x8f, 0xe6, 0xf2, 0xfd] mut d := decoder_new(e.data) - assert d.get_list_int() == list + assert d.get_list_int()! == list } fn test_list_u8() { @@ -121,7 +121,7 @@ fn test_list_u8() { assert e.data == [u8(3), 0, 153, 0, 22] mut d := decoder_new(e.data) - assert d.get_list_u8() == list + assert d.get_list_u8()! == list } fn test_list_u16() { @@ -132,7 +132,7 @@ fn test_list_u16() { assert e.data == [u8(3), 0, 0x25, 0x87, 0, 0, 0xff, 0xfd] mut d := decoder_new(e.data) - assert d.get_list_u16() == list + assert d.get_list_u16()! == list } fn test_list_u32() { @@ -143,7 +143,7 @@ fn test_list_u32() { assert e.data == [u8(3), 0, 0x95, 0xea, 0x2f, 0x87, 0, 0, 0, 0, 0x8f, 0xe6, 0xf2, 0xfd] mut d := decoder_new(e.data) - assert d.get_list_u32() == list + assert d.get_list_u32()! == list } fn test_map_string() { @@ -157,7 +157,7 @@ fn test_map_string() { assert e.data == [u8(2), 0, 1, 0, 49, 1, 0, 97, 1, 0, 50, 2, 0, 98, 99] mut d := decoder_new(e.data) - assert d.get_map_string() == mp + assert d.get_map_string()! == mp } fn test_map_bytes() { @@ -171,7 +171,7 @@ fn test_map_bytes() { assert e.data == [u8(2), 0, 1, 0, 49, 1, 0, 0, 0, 97, 1, 0, 50, 2, 0, 0, 0, 98, 99] mut d := decoder_new(e.data) - assert d.get_map_bytes() == mp + assert d.get_map_bytes()! == mp } struct StructType[T] { diff --git a/lib/data/encoderhero/readme.md b/lib/data/encoderhero/readme.md index af10be2c..ffd7a6c7 100644 --- a/lib/data/encoderhero/readme.md +++ b/lib/data/encoderhero/readme.md @@ -1,23 +1,18 @@ # hero Encoder -> encoder hero is based on json2 from https://github.com/vlang/v/blob/master/vlib/x/json2/README.md - -## Usage - -#### encode[T] - ```v -#!/usr/bin/env -S v -n -cg -w -enable-globals run + +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.data.encoderhero +import freeflowuniverse.herolib.core.base import time struct Person { mut: name string - age ?int = 20 + age int = 20 birthday time.Time - deathday ?time.Time } mut person := Person{ @@ -26,44 +21,11 @@ mut person := Person{ } heroscript := encoderhero.encode[Person](person)! -``` +println(heroscript) -#### decode[T] +person2 := encoderhero.decode[Person](heroscript)! -```v -import freeflowuniverse.herolib.data.encoderhero -import time - -struct Person { -mut: - name string - age ?int = 20 - birthday time.Time - deathday ?time.Time -} - -data := ' - -' - -person := encoderhero.decode[Person](data)! -/* -struct Person { - mut: - name "Bob" - age 20 - birthday "2022-03-11 13:54:25" - } -*/ +println(person2) ``` - -## License - -for all original code as used from Alexander: - -// Copyright (c) 2019-2024 Alexander Medvednikov. All rights reserved. -// Use of this source code is governed by an MIT license -// that can be found in the LICENSE file. - diff --git a/lib/data/graphdb/README.md b/lib/data/graphdb/README.md new file mode 100644 index 00000000..1e3d76ed --- /dev/null +++ b/lib/data/graphdb/README.md @@ -0,0 +1,170 @@ +# GraphDB + +A lightweight, efficient graph database implementation in V that supports property graphs with nodes and edges. It provides both in-memory caching and persistent storage capabilities. + +## Features + +- Property Graph Model + - Nodes with key-value properties + - Typed edges with properties + - Bidirectional edge traversal +- Persistent Storage + - Automatic data persistence + - Efficient serialization +- Memory-Efficient Caching + - LRU caching for nodes and edges + - Configurable cache sizes +- Rich Query Capabilities + - Property-based node queries + - Edge-based node traversal + - Relationship type filtering +- CRUD Operations + - Create, read, update, and delete nodes + - Manage relationships between nodes + - Update properties dynamically + +## Installation + +GraphDB is part of the HeroLib library. Include it in your V project: + +```v +import freeflowuniverse.herolib.data.graphdb +``` + +## Basic Usage + +Here's a simple example demonstrating core functionality: + +```v +import freeflowuniverse.herolib.data.graphdb + +fn main() { + // Create a new graph database + mut gdb := graphdb.new(path: '/tmp/mydb', reset: true)! + + // Create nodes + user_id := gdb.create_node({ + 'name': 'John', + 'age': '30', + 'city': 'London' + })! + + company_id := gdb.create_node({ + 'name': 'TechCorp', + 'industry': 'Technology' + })! + + // Create relationship + gdb.create_edge(user_id, company_id, 'WORKS_AT', { + 'role': 'Developer', + 'since': '2022' + })! + + // Query nodes by property + london_users := gdb.query_nodes_by_property('city', 'London')! + + // Find connected nodes + workplaces := gdb.get_connected_nodes(user_id, 'WORKS_AT', 'out')! +} +``` + +## API Reference + +### Creating a Database + +```v +// Create new database instance +struct NewArgs { + path string // Storage path + reset bool // Clear existing data + cache_config CacheConfig // Optional cache configuration +} +db := graphdb.new(NewArgs{...})! +``` + +### Node Operations + +```v +// Create node +node_id := db.create_node(properties: map[string]string)! + +// Get node +node := db.get_node(id: u32)! + +// Update node +db.update_node(id: u32, properties: map[string]string)! + +// Delete node (and connected edges) +db.delete_node(id: u32)! + +// Query nodes by property +nodes := db.query_nodes_by_property(key: string, value: string)! +``` + +### Edge Operations + +```v +// Create edge +edge_id := db.create_edge(from_id: u32, to_id: u32, edge_type: string, properties: map[string]string)! + +// Get edge +edge := db.get_edge(id: u32)! + +// Update edge +db.update_edge(id: u32, properties: map[string]string)! + +// Delete edge +db.delete_edge(id: u32)! + +// Get edges between nodes +edges := db.get_edges_between(from_id: u32, to_id: u32)! +``` + +### Graph Traversal + +```v +// Get connected nodes +// direction can be 'in', 'out', or 'both' +nodes := db.get_connected_nodes(id: u32, edge_type: string, direction: string)! +``` + +## Data Model + +### Node Structure + +```v +struct Node { + id u32 // Unique identifier + properties map[string]string // Key-value properties + node_type string // Type of node + edges_out []EdgeRef // Outgoing edge references + edges_in []EdgeRef // Incoming edge references +} +``` + +### Edge Structure + +```v +struct Edge { + id u32 // Unique identifier + from_node u32 // Source node ID + to_node u32 // Target node ID + edge_type string // Type of relationship + properties map[string]string // Key-value properties + weight u16 // Edge weight +} +``` + +## Performance Considerations + +- The database uses LRU caching for both nodes and edges to improve read performance +- Persistent storage is handled efficiently through the underlying OurDB implementation +- Edge references are stored in both source and target nodes for efficient traversal +- Property queries perform full scans - consider indexing needs for large datasets + +## Example Use Cases + +- Social Networks: Modeling user relationships and interactions +- Knowledge Graphs: Representing connected information and metadata +- Organization Charts: Modeling company structure and relationships +- Recommendation Systems: Building relationship-based recommendation engines diff --git a/lib/data/graphdb/graphdb.v b/lib/data/graphdb/graphdb.v new file mode 100644 index 00000000..17be3a71 --- /dev/null +++ b/lib/data/graphdb/graphdb.v @@ -0,0 +1,360 @@ +module graphdb + +import freeflowuniverse.herolib.data.ourdb +import freeflowuniverse.herolib.data.cache { Cache, CacheConfig, new_cache } + +// Node represents a vertex in the graph with properties and edge references +@[heap] +pub struct Node { +pub mut: + id u32 // Unique identifier + properties map[string]string // Key-value properties + node_type string // Type of node can e.g. refer to a object implementation e.g. a User, ... + edges_out []EdgeRef // Outgoing edge references + edges_in []EdgeRef // Incoming edge references +} + +// Edge represents a connection between nodes with properties +@[heap] +pub struct Edge { +pub mut: + id u32 // Unique identifier + from_node u32 // Source node ID + to_node u32 // Target node ID + edge_type string // Type of relationship + properties map[string]string // Key-value properties + weight u16 // weight of the connection between the objects +} + +// EdgeRef is a lightweight reference to an edge +@[heap] +pub struct EdgeRef { +pub mut: + edge_id u32 // Database ID of the edge + edge_type string // Type of the edge relationship +} + +// GraphDB represents the graph database +pub struct GraphDB { +mut: + db &ourdb.OurDB // Database for persistent storage + node_cache &Cache[Node] // Cache for nodes + edge_cache &Cache[Edge] // Cache for edges +} + +pub struct NewArgs { +pub mut: + path string + reset bool + cache_config CacheConfig = CacheConfig{} // Default cache configuration +} + +// Creates a new graph database instance +pub fn new(args NewArgs) !&GraphDB { + mut db := ourdb.new( + path: args.path + record_size_max: 1024 * 4 // 4KB max record size + incremental_mode: true + reset: args.reset + )! + + // Create type-specific caches with provided config + node_cache := new_cache[Node](args.cache_config) + edge_cache := new_cache[Edge](args.cache_config) + + return &GraphDB{ + db: &db + node_cache: node_cache + edge_cache: edge_cache + } +} + +// Creates a new node with the given properties +pub fn (mut gdb GraphDB) create_node(properties map[string]string) !u32 { + mut node := Node{ + properties: properties + edges_out: []EdgeRef{} + edges_in: []EdgeRef{} + } + + // Let OurDB assign the ID in incremental mode + node_id := gdb.db.set(data: serialize_node(node))! + + // Update node with assigned ID and cache it + node.id = node_id + gdb.node_cache.set(node_id, &node) + + return node_id +} + +// Creates an edge between two nodes +pub fn (mut gdb GraphDB) create_edge(from_id u32, to_id u32, edge_type string, properties map[string]string) !u32 { + // Create the edge + mut edge := Edge{ + from_node: from_id + to_node: to_id + edge_type: edge_type + properties: properties + } + + // Let OurDB assign the ID in incremental mode + edge_id := gdb.db.set(data: serialize_edge(edge))! + + // Update edge with assigned ID and cache it + edge.id = edge_id + gdb.edge_cache.set(edge_id, &edge) + + // Update source node's outgoing edges + mut from_node := deserialize_node(gdb.db.get(from_id)!)! + from_node.edges_out << EdgeRef{ + edge_id: edge_id + edge_type: edge_type + } + gdb.db.set(id: from_id, data: serialize_node(from_node))! + gdb.node_cache.set(from_id, &from_node) + + // Update target node's incoming edges + mut to_node := deserialize_node(gdb.db.get(to_id)!)! + to_node.edges_in << EdgeRef{ + edge_id: edge_id + edge_type: edge_type + } + gdb.db.set(id: to_id, data: serialize_node(to_node))! + gdb.node_cache.set(to_id, &to_node) + + return edge_id +} + +// Gets a node by its ID +pub fn (mut gdb GraphDB) get_node(id u32) !Node { + // Try cache first + if cached_node := gdb.node_cache.get(id) { + return *cached_node + } + + // Load from database + node_data := gdb.db.get(id)! + node := deserialize_node(node_data)! + + // Cache the node + gdb.node_cache.set(id, &node) + + return node +} + +// Gets an edge by its ID +pub fn (mut gdb GraphDB) get_edge(id u32) !Edge { + // Try cache first + if cached_edge := gdb.edge_cache.get(id) { + return *cached_edge + } + + // Load from database + edge_data := gdb.db.get(id)! + edge := deserialize_edge(edge_data)! + + // Cache the edge + gdb.edge_cache.set(id, &edge) + + return edge +} + +// Updates a node's properties +pub fn (mut gdb GraphDB) update_node(id u32, properties map[string]string) ! { + mut node := deserialize_node(gdb.db.get(id)!)! + node.properties = properties.clone() + + // Update database + gdb.db.set(id: id, data: serialize_node(node))! + + // Update cache + gdb.node_cache.set(id, &node) +} + +// Updates an edge's properties +pub fn (mut gdb GraphDB) update_edge(id u32, properties map[string]string) ! { + mut edge := deserialize_edge(gdb.db.get(id)!)! + edge.properties = properties.clone() + + // Update database + gdb.db.set(id: id, data: serialize_edge(edge))! + + // Update cache + gdb.edge_cache.set(id, &edge) +} + +// Deletes a node and all its edges +pub fn (mut gdb GraphDB) delete_node(id u32) ! { + node := deserialize_node(gdb.db.get(id)!)! + + // Delete outgoing edges + for edge_ref in node.edges_out { + gdb.delete_edge(edge_ref.edge_id)! + } + + // Delete incoming edges + for edge_ref in node.edges_in { + gdb.delete_edge(edge_ref.edge_id)! + } + + // Delete from database + gdb.db.delete(id)! + + // Remove from cache + gdb.node_cache.remove(id) +} + +// Deletes an edge and updates connected nodes +pub fn (mut gdb GraphDB) delete_edge(id u32) ! { + edge := deserialize_edge(gdb.db.get(id)!)! + + // Update source node + mut from_node := deserialize_node(gdb.db.get(edge.from_node)!)! + for i, edge_ref in from_node.edges_out { + if edge_ref.edge_id == id { + from_node.edges_out.delete(i) + break + } + } + gdb.db.set(id: edge.from_node, data: serialize_node(from_node))! + gdb.node_cache.set(edge.from_node, &from_node) + + // Update target node + mut to_node := deserialize_node(gdb.db.get(edge.to_node)!)! + for i, edge_ref in to_node.edges_in { + if edge_ref.edge_id == id { + to_node.edges_in.delete(i) + break + } + } + gdb.db.set(id: edge.to_node, data: serialize_node(to_node))! + gdb.node_cache.set(edge.to_node, &to_node) + + // Delete from database and cache + gdb.db.delete(id)! + gdb.edge_cache.remove(id) +} + +// Queries nodes by property value +pub fn (mut gdb GraphDB) query_nodes_by_property(key string, value string) ![]Node { + mut nodes := []Node{} + mut next_id := gdb.db.get_next_id()! + + // Process each ID up to next_id + for id := u32(0); id < next_id; id++ { + // Try to get from cache first + if cached := gdb.node_cache.get(id) { + if prop_value := cached.properties[key] { + if prop_value == value { + nodes << *cached + } + } + continue + } + + // Not in cache, try to get from database + raw_data := gdb.db.get(id) or { continue } + mut node := deserialize_node(raw_data) or { continue } + + // Cache the node for future use + gdb.node_cache.set(id, &node) + + // Check if this node matches the query + if prop_value := node.properties[key] { + if prop_value == value { + nodes << node + } + } + } + + return nodes +} + +// Gets all edges between two nodes +pub fn (mut gdb GraphDB) get_edges_between(from_id u32, to_id u32) ![]Edge { + mut from_node := if cached := gdb.node_cache.get(from_id) { + *cached + } else { + node := deserialize_node(gdb.db.get(from_id)!)! + gdb.node_cache.set(from_id, &node) + node + } + + mut edges := []Edge{} + for edge_ref in from_node.edges_out { + edge_data := if cached := gdb.edge_cache.get(edge_ref.edge_id) { + *cached + } else { + mut edge := deserialize_edge(gdb.db.get(edge_ref.edge_id)!)! + gdb.edge_cache.set(edge_ref.edge_id, &edge) + edge + } + + if edge_data.to_node == to_id { + edges << edge_data + } + } + + return edges +} + +// Gets all nodes connected to a given node by edge type +pub fn (mut gdb GraphDB) get_connected_nodes(id u32, edge_type string, direction string) ![]Node { + mut start_node := if cached := gdb.node_cache.get(id) { + *cached + } else { + node := deserialize_node(gdb.db.get(id)!)! + gdb.node_cache.set(id, &node) + node + } + + mut connected_nodes := []Node{} + + if direction in ['out', 'both'] { + for edge_ref in start_node.edges_out { + if edge_ref.edge_type == edge_type { + edge_data := if cached := gdb.edge_cache.get(edge_ref.edge_id) { + *cached + } else { + mut edge := deserialize_edge(gdb.db.get(edge_ref.edge_id)!)! + gdb.edge_cache.set(edge_ref.edge_id, &edge) + edge + } + + mut target_node := if cached := gdb.node_cache.get(edge_data.to_node) { + *cached + } else { + node := deserialize_node(gdb.db.get(edge_data.to_node)!)! + gdb.node_cache.set(edge_data.to_node, &node) + node + } + connected_nodes << target_node + } + } + } + + if direction in ['in', 'both'] { + for edge_ref in start_node.edges_in { + if edge_ref.edge_type == edge_type { + edge_data := if cached := gdb.edge_cache.get(edge_ref.edge_id) { + *cached + } else { + mut edge := deserialize_edge(gdb.db.get(edge_ref.edge_id)!)! + gdb.edge_cache.set(edge_ref.edge_id, &edge) + edge + } + + mut source_node := if cached := gdb.node_cache.get(edge_data.from_node) { + *cached + } else { + node := deserialize_node(gdb.db.get(edge_data.from_node)!)! + gdb.node_cache.set(edge_data.from_node, &node) + node + } + connected_nodes << source_node + } + } + } + + return connected_nodes +} diff --git a/lib/data/graphdb/graphdb_debug.v b/lib/data/graphdb/graphdb_debug.v new file mode 100644 index 00000000..3e1449ae --- /dev/null +++ b/lib/data/graphdb/graphdb_debug.v @@ -0,0 +1,246 @@ +module graphdb + +// Gets detailed information about a node +pub fn (mut gdb GraphDB) debug_node(id u32) !string { + node := gdb.get_node(id)! + + mut info := '\nNode Details (ID: ${id})\n' + info += '===================\n' + + // Properties + info += '\nProperties:\n' + if node.properties.len == 0 { + info += ' (none)\n' + } else { + for key, value in node.properties { + info += ' ${key}: ${value}\n' + } + } + + // Outgoing edges + info += '\nOutgoing Edges:\n' + if node.edges_out.len == 0 { + info += ' (none)\n' + } else { + for edge_ref in node.edges_out { + edge := gdb.get_edge(edge_ref.edge_id)! + target := gdb.get_node(edge.to_node)! + info += ' -[${edge_ref.edge_type}]-> Node(${edge.to_node})' + if name := target.properties['name'] { + info += ' (${name})' + } + if edge.properties.len > 0 { + info += ' {' + mut first := true + for key, value in edge.properties { + if !first { + info += ', ' + } + info += '${key}: ${value}' + first = false + } + info += '}' + } + info += '\n' + } + } + + // Incoming edges + info += '\nIncoming Edges:\n' + if node.edges_in.len == 0 { + info += ' (none)\n' + } else { + for edge_ref in node.edges_in { + edge := gdb.get_edge(edge_ref.edge_id)! + source := gdb.get_node(edge.from_node)! + info += ' <-[${edge_ref.edge_type}]- Node(${edge.from_node})' + if name := source.properties['name'] { + info += ' (${name})' + } + if edge.properties.len > 0 { + info += ' {' + mut first := true + for key, value in edge.properties { + if !first { + info += ', ' + } + info += '${key}: ${value}' + first = false + } + info += '}' + } + info += '\n' + } + } + + return info +} + +// Gets detailed information about an edge +pub fn (mut gdb GraphDB) debug_edge(id u32) !string { + edge := gdb.get_edge(id)! + from_node := gdb.get_node(edge.from_node)! + to_node := gdb.get_node(edge.to_node)! + + mut info := '\nEdge Details (ID: ${id})\n' + info += '===================\n' + + // Basic info + info += '\nType: ${edge.edge_type}\n' + + // Connected nodes + info += '\nFrom Node (ID: ${edge.from_node}):\n' + if name := from_node.properties['name'] { + info += ' name: ${name}\n' + } + for key, value in from_node.properties { + if key != 'name' { + info += ' ${key}: ${value}\n' + } + } + + info += '\nTo Node (ID: ${edge.to_node}):\n' + if name := to_node.properties['name'] { + info += ' name: ${name}\n' + } + for key, value in to_node.properties { + if key != 'name' { + info += ' ${key}: ${value}\n' + } + } + + // Edge properties + info += '\nProperties:\n' + if edge.properties.len == 0 { + info += ' (none)\n' + } else { + for key, value in edge.properties { + info += ' ${key}: ${value}\n' + } + } + + return info +} + +// Prints the current state of the database +pub fn (mut gdb GraphDB) debug_db() ! { + mut next_id := gdb.db.get_next_id()! + + println('\nGraph Database State') + println('===================') + + // Print all nodes + println('\nNodes:') + println('------') + for id := u32(0); id < next_id; id++ { + if node_data := gdb.db.get(id) { + if node := deserialize_node(node_data) { + mut node_info := 'Node(${id})' + if name := node.properties['name'] { + node_info += ' (${name})' + } + node_info += ' - Properties: ${node.properties.len}, Out Edges: ${node.edges_out.len}, In Edges: ${node.edges_in.len}' + println(node_info) + } + } + } + + // Print all edges + println('\nEdges:') + println('------') + for id := u32(0); id < next_id; id++ { + if edge_data := gdb.db.get(id) { + if edge := deserialize_edge(edge_data) { + mut from_name := '' + mut to_name := '' + + if from_node := gdb.get_node(edge.from_node) { + if name := from_node.properties['name'] { + from_name = ' (${name})' + } + } + + if to_node := gdb.get_node(edge.to_node) { + if name := to_node.properties['name'] { + to_name = ' (${name})' + } + } + + mut edge_info := 'Edge(${id}): Node(${edge.from_node})${from_name} -[${edge.edge_type}]-> Node(${edge.to_node})${to_name}' + if edge.properties.len > 0 { + edge_info += ' {' + mut first := true + for key, value in edge.properties { + if !first { + edge_info += ', ' + } + edge_info += '${key}: ${value}' + first = false + } + edge_info += '}' + } + println(edge_info) + } + } + } +} + +// Prints a visual representation of the graph starting from a given node +pub fn (mut gdb GraphDB) print_graph_from(start_id u32, visited map[u32]bool) ! { + if start_id in visited { + return + } + + mut my_visited := visited.clone() + my_visited[start_id] = true + + node := gdb.get_node(start_id)! + + mut node_info := 'Node(${start_id})' + if name := node.properties['name'] { + node_info += ' (${name})' + } + println(node_info) + + // Print outgoing edges and recurse + for edge_ref in node.edges_out { + edge := gdb.get_edge(edge_ref.edge_id)! + mut edge_info := ' -[${edge.edge_type}]->' + + if edge.properties.len > 0 { + edge_info += ' {' + mut first := true + for key, value in edge.properties { + if !first { + edge_info += ', ' + } + edge_info += '${key}: ${value}' + first = false + } + edge_info += '}' + } + + println(edge_info) + gdb.print_graph_from(edge.to_node, my_visited)! + } +} + +// Prints a visual representation of the entire graph +pub fn (mut gdb GraphDB) print_graph() ! { + println('\nGraph Structure') + println('===============') + + mut visited := map[u32]bool{} + mut next_id := gdb.db.get_next_id()! + + // Start from each unvisited node to handle disconnected components + for id := u32(0); id < next_id; id++ { + if id !in visited { + if node_data := gdb.db.get(id) { + if _ := deserialize_node(node_data) { + gdb.print_graph_from(id, visited)! + } + } + } + } +} diff --git a/lib/data/graphdb/graphdb_test.v b/lib/data/graphdb/graphdb_test.v new file mode 100644 index 00000000..4084cb1e --- /dev/null +++ b/lib/data/graphdb/graphdb_test.v @@ -0,0 +1,208 @@ +module graphdb + +fn test_basic_operations() ! { + mut gdb := new(path: '/tmp/graphdb_test', reset: true)! + + // Test creating nodes with properties + mut person1_id := gdb.create_node({ + 'name': 'Alice' + 'age': '30' + })! + + mut person2_id := gdb.create_node({ + 'name': 'Bob' + 'age': '25' + })! + + // Test retrieving nodes + person1 := gdb.get_node(person1_id)! + assert person1.properties['name'] == 'Alice' + assert person1.properties['age'] == '30' + + person2 := gdb.get_node(person2_id)! + assert person2.properties['name'] == 'Bob' + assert person2.properties['age'] == '25' + + // Test creating edge between nodes + edge_id := gdb.create_edge(person1_id, person2_id, 'KNOWS', { + 'since': '2020' + })! + + // Test retrieving edge + edge := gdb.get_edge(edge_id)! + assert edge.edge_type == 'KNOWS' + assert edge.properties['since'] == '2020' + assert edge.from_node == person1_id + assert edge.to_node == person2_id + + // Test querying nodes by property + alice_nodes := gdb.query_nodes_by_property('name', 'Alice')! + assert alice_nodes.len == 1 + assert alice_nodes[0].properties['age'] == '30' + + // Test getting connected nodes + bob_knows := gdb.get_connected_nodes(person1_id, 'KNOWS', 'out')! + assert bob_knows.len == 1 + assert bob_knows[0].properties['name'] == 'Bob' + + alice_known_by := gdb.get_connected_nodes(person2_id, 'KNOWS', 'in')! + assert alice_known_by.len == 1 + assert alice_known_by[0].properties['name'] == 'Alice' + + // Test updating node properties + gdb.update_node(person1_id, { + 'name': 'Alice' + 'age': '31' + })! + updated_alice := gdb.get_node(person1_id)! + assert updated_alice.properties['age'] == '31' + + // Test updating edge properties + gdb.update_edge(edge_id, { + 'since': '2021' + })! + updated_edge := gdb.get_edge(edge_id)! + assert updated_edge.properties['since'] == '2021' + + // Test getting edges between nodes + edges := gdb.get_edges_between(person1_id, person2_id)! + assert edges.len == 1 + assert edges[0].edge_type == 'KNOWS' + + // Test deleting edge + gdb.delete_edge(edge_id)! + remaining_edges := gdb.get_edges_between(person1_id, person2_id)! + assert remaining_edges.len == 0 + + // Test deleting node + gdb.delete_node(person1_id)! + if _ := gdb.get_node(person1_id) { + assert false, 'Expected error for deleted node' + } +} + +fn test_complex_graph() ! { + mut gdb := new(path: '/tmp/graphdb_test_complex', reset: true)! + + // Create nodes representing people + mut alice_id := gdb.create_node({ + 'name': 'Alice' + 'age': '30' + 'city': 'New York' + })! + + mut bob_id := gdb.create_node({ + 'name': 'Bob' + 'age': '25' + 'city': 'Boston' + })! + + mut charlie_id := gdb.create_node({ + 'name': 'Charlie' + 'age': '35' + 'city': 'New York' + })! + + // Create nodes representing companies + mut company1_id := gdb.create_node({ + 'name': 'TechCorp' + 'industry': 'Technology' + })! + + mut company2_id := gdb.create_node({ + 'name': 'FinCo' + 'industry': 'Finance' + })! + + // Create relationships + gdb.create_edge(alice_id, bob_id, 'KNOWS', { + 'since': '2020' + })! + gdb.create_edge(bob_id, charlie_id, 'KNOWS', { + 'since': '2019' + })! + gdb.create_edge(charlie_id, alice_id, 'KNOWS', { + 'since': '2018' + })! + + gdb.create_edge(alice_id, company1_id, 'WORKS_AT', { + 'role': 'Engineer' + })! + gdb.create_edge(bob_id, company2_id, 'WORKS_AT', { + 'role': 'Analyst' + })! + gdb.create_edge(charlie_id, company1_id, 'WORKS_AT', { + 'role': 'Manager' + })! + + // Test querying by property + ny_people := gdb.query_nodes_by_property('city', 'New York')! + assert ny_people.len == 2 + + // Test getting connected nodes with different edge types + alice_knows := gdb.get_connected_nodes(alice_id, 'KNOWS', 'out')! + assert alice_knows.len == 1 + assert alice_knows[0].properties['name'] == 'Bob' + + alice_works_at := gdb.get_connected_nodes(alice_id, 'WORKS_AT', 'out')! + assert alice_works_at.len == 1 + assert alice_works_at[0].properties['name'] == 'TechCorp' + + // Test getting nodes connected in both directions + charlie_connections := gdb.get_connected_nodes(charlie_id, 'KNOWS', 'both')! + assert charlie_connections.len == 2 + + // Test company employees + techcorp_employees := gdb.get_connected_nodes(company1_id, 'WORKS_AT', 'in')! + assert techcorp_employees.len == 2 + + finco_employees := gdb.get_connected_nodes(company2_id, 'WORKS_AT', 'in')! + assert finco_employees.len == 1 + assert finco_employees[0].properties['name'] == 'Bob' +} + +fn test_edge_cases() ! { + mut gdb := new(path: '/tmp/graphdb_test_edge', reset: true)! + + // Test empty properties + node_id := gdb.create_node(map[string]string{})! + node := gdb.get_node(node_id)! + assert node.properties.len == 0 + + // Test node with many properties + mut large_props := map[string]string{} + for i in 0 .. 100 { + large_props['key${i}'] = 'value${i}' + } + large_node_id := gdb.create_node(large_props)! + large_node := gdb.get_node(large_node_id)! + assert large_node.properties.len == 100 + + // Test edge with empty properties + other_node_id := gdb.create_node({})! + edge_id := gdb.create_edge(node_id, other_node_id, 'TEST', map[string]string{})! + edge := gdb.get_edge(edge_id)! + assert edge.properties.len == 0 + + // Test querying non-existent property + empty_results := gdb.query_nodes_by_property('nonexistent', 'value')! + assert empty_results.len == 0 + + // Test getting edges between unconnected nodes + no_edges := gdb.get_edges_between(node_id, large_node_id)! + assert no_edges.len == 0 + + // Test getting connected nodes with non-existent edge type + no_connections := gdb.get_connected_nodes(node_id, 'NONEXISTENT', 'both')! + assert no_connections.len == 0 + + // Test deleting non-existent edge + if _ := gdb.delete_edge(u32(99999)) { + assert false, 'Expected error for non-existent edge' + } + + // Test deleting non-existent node + if _ := gdb.delete_node(u32(99999)) { + assert false, 'Expected error for non-existent node' + } +} diff --git a/lib/data/graphdb/search.v b/lib/data/graphdb/search.v new file mode 100644 index 00000000..e9eef6f9 --- /dev/null +++ b/lib/data/graphdb/search.v @@ -0,0 +1,99 @@ +module graphdb + +// SearchConfig represents the configuration for graph traversal search +pub struct SearchConfig { +pub mut: + types []string // List of node types to search for + max_distance f32 // Maximum distance to traverse using edge weights +} + +// SearchResult represents a node found during search with its distance from start +pub struct SearchResult { +pub: + node &Node + distance f32 +} + +// search performs a breadth-first traversal from a start node +// Returns nodes of specified types within max_distance +pub fn (mut gdb GraphDB) search(start_id u32, config SearchConfig) ![]SearchResult { + mut results := []SearchResult{} + mut visited := map[u32]f32{} // Maps node ID to shortest distance found + mut queue := []u32{cap: 100} // Queue of node IDs to visit + + // Start from the given node + queue << start_id + visited[start_id] = 0 + + // Process nodes in queue + for queue.len > 0 { + current_id := queue[0] + queue.delete(0) + + current_distance := visited[current_id] + if current_distance > config.max_distance { + continue + } + + // Get current node + current_node := gdb.get_node(current_id)! + + // Add to results if node type matches search criteria + if config.types.len == 0 || current_node.node_type in config.types { + results << SearchResult{ + node: ¤t_node + distance: current_distance + } + } + + // Process outgoing edges + for edge_ref in current_node.edges_out { + edge := gdb.get_edge(edge_ref.edge_id)! + next_id := edge.to_node + + // Calculate new distance using edge weight + weight := if edge.weight == 0 { f32(1) } else { f32(edge.weight) } + new_distance := current_distance + weight + + // Skip if we've found a shorter path or would exceed max distance + if new_distance > config.max_distance { + continue + } + if next_distance := visited[next_id] { + if new_distance >= next_distance { + continue + } + } + + // Add to queue and update distance + queue << next_id + visited[next_id] = new_distance + } + + // Process incoming edges + for edge_ref in current_node.edges_in { + edge := gdb.get_edge(edge_ref.edge_id)! + next_id := edge.from_node + + // Calculate new distance using edge weight + weight := if edge.weight == 0 { f32(1) } else { f32(edge.weight) } + new_distance := current_distance + weight + + // Skip if we've found a shorter path or would exceed max distance + if new_distance > config.max_distance { + continue + } + if next_distance := visited[next_id] { + if new_distance >= next_distance { + continue + } + } + + // Add to queue and update distance + queue << next_id + visited[next_id] = new_distance + } + } + + return results +} diff --git a/lib/data/graphdb/search_test.v b/lib/data/graphdb/search_test.v new file mode 100644 index 00000000..a8bc4837 --- /dev/null +++ b/lib/data/graphdb/search_test.v @@ -0,0 +1,156 @@ +module graphdb + +fn test_search() ! { + mut gdb := new(NewArgs{ + path: 'test_search.db' + reset: true + })! + + // Create test nodes of different types + mut user1 := Node{ + properties: { + 'name': 'User 1' + } + node_type: 'user' + } + user1_id := gdb.db.set(data: serialize_node(user1))! + user1.id = user1_id + gdb.node_cache.set(user1_id, &user1) + + mut user2 := Node{ + properties: { + 'name': 'User 2' + } + node_type: 'user' + } + user2_id := gdb.db.set(data: serialize_node(user2))! + user2.id = user2_id + gdb.node_cache.set(user2_id, &user2) + + mut post1 := Node{ + properties: { + 'title': 'Post 1' + } + node_type: 'post' + } + post1_id := gdb.db.set(data: serialize_node(post1))! + post1.id = post1_id + gdb.node_cache.set(post1_id, &post1) + + mut post2 := Node{ + properties: { + 'title': 'Post 2' + } + node_type: 'post' + } + post2_id := gdb.db.set(data: serialize_node(post2))! + post2.id = post2_id + gdb.node_cache.set(post2_id, &post2) + + // Create edges with different weights + mut edge1 := Edge{ + from_node: user1_id + to_node: post1_id + edge_type: 'created' + weight: 1 + } + edge1_id := gdb.db.set(data: serialize_edge(edge1))! + edge1.id = edge1_id + gdb.edge_cache.set(edge1_id, &edge1) + + mut edge2 := Edge{ + from_node: post1_id + to_node: post2_id + edge_type: 'related' + weight: 2 + } + edge2_id := gdb.db.set(data: serialize_edge(edge2))! + edge2.id = edge2_id + gdb.edge_cache.set(edge2_id, &edge2) + + mut edge3 := Edge{ + from_node: user2_id + to_node: post2_id + edge_type: 'created' + weight: 1 + } + edge3_id := gdb.db.set(data: serialize_edge(edge3))! + edge3.id = edge3_id + gdb.edge_cache.set(edge3_id, &edge3) + + // Update node edge references + user1.edges_out << EdgeRef{ + edge_id: edge1_id + edge_type: 'created' + } + gdb.db.set(id: user1_id, data: serialize_node(user1))! + gdb.node_cache.set(user1_id, &user1) + + post1.edges_in << EdgeRef{ + edge_id: edge1_id + edge_type: 'created' + } + post1.edges_out << EdgeRef{ + edge_id: edge2_id + edge_type: 'related' + } + gdb.db.set(id: post1_id, data: serialize_node(post1))! + gdb.node_cache.set(post1_id, &post1) + + post2.edges_in << EdgeRef{ + edge_id: edge2_id + edge_type: 'related' + } + post2.edges_in << EdgeRef{ + edge_id: edge3_id + edge_type: 'created' + } + gdb.db.set(id: post2_id, data: serialize_node(post2))! + gdb.node_cache.set(post2_id, &post2) + + user2.edges_out << EdgeRef{ + edge_id: edge3_id + edge_type: 'created' + } + gdb.db.set(id: user2_id, data: serialize_node(user2))! + gdb.node_cache.set(user2_id, &user2) + + // Test 1: Search for posts within distance 2 + results1 := gdb.search(user1_id, SearchConfig{ + types: ['post'] + max_distance: 2 + })! + + assert results1.len == 1 // Should only find post1 within distance 2 + assert results1[0].node.properties['title'] == 'Post 1' + assert results1[0].distance == 1 + + // Test 2: Search for posts within distance 4 + results2 := gdb.search(user1_id, SearchConfig{ + types: ['post'] + max_distance: 4 + })! + + assert results2.len == 2 // Should find both posts + assert results2[0].node.properties['title'] == 'Post 1' + assert results2[1].node.properties['title'] == 'Post 2' + assert results2[1].distance == 3 + + // Test 3: Search for users within distance 3 + results3 := gdb.search(post2_id, SearchConfig{ + types: ['user'] + max_distance: 3 + })! + + assert results3.len == 2 // Should find both users + assert results3[0].node.properties['name'] in ['User 1', 'User 2'] + assert results3[1].node.properties['name'] in ['User 1', 'User 2'] + + // Test 4: Search without type filter + results4 := gdb.search(user1_id, SearchConfig{ + types: [] + max_distance: 4 + })! + + assert results4.len == 4 // Should find all nodes +} diff --git a/lib/data/graphdb/serialization.v b/lib/data/graphdb/serialization.v new file mode 100644 index 00000000..281bde3f --- /dev/null +++ b/lib/data/graphdb/serialization.v @@ -0,0 +1,165 @@ +module graphdb + +import freeflowuniverse.herolib.data.encoder + +const version_v1 = u8(1) + +// Serializes a Node struct to bytes +pub fn serialize_node(node Node) []u8 { + mut e := encoder.new() + + // Add version byte + e.add_u8(version_v1) + + // Serialize node ID + e.add_u32(node.id) + + // Serialize node type + e.add_string(node.node_type) + + // Serialize properties + e.add_u16(u16(node.properties.len)) // Number of properties + for key, value in node.properties { + e.add_string(key) + e.add_string(value) + } + + // Serialize outgoing edges + e.add_u16(u16(node.edges_out.len)) // Number of outgoing edges + for edge in node.edges_out { + e.add_u32(edge.edge_id) + e.add_string(edge.edge_type) + } + + // Serialize incoming edges + e.add_u16(u16(node.edges_in.len)) // Number of incoming edges + for edge in node.edges_in { + e.add_u32(edge.edge_id) + e.add_string(edge.edge_type) + } + + return e.data +} + +// Deserializes bytes to a Node struct +pub fn deserialize_node(data []u8) !Node { + if data.len < 1 { + return error('Invalid node data: too short') + } + + mut d := encoder.decoder_new(data) + + // Check version + version := d.get_u8()! + if version != version_v1 { + return error('Unsupported version: ${version}') + } + + mut node := Node{ + properties: map[string]string{} + edges_out: []EdgeRef{} + edges_in: []EdgeRef{} + } + + // Deserialize node ID + node.id = d.get_u32()! + + // Deserialize node type + node.node_type = d.get_string()! + + // Deserialize properties + num_properties := d.get_u16()! + for _ in 0 .. num_properties { + key := d.get_string()! + value := d.get_string()! + node.properties[key] = value + } + + // Deserialize outgoing edges + num_edges_out := d.get_u16()! + for _ in 0 .. num_edges_out { + edge_id := d.get_u32()! + edge_type := d.get_string()! + node.edges_out << EdgeRef{ + edge_id: edge_id + edge_type: edge_type + } + } + + // Deserialize incoming edges + num_edges_in := d.get_u16()! + for _ in 0 .. num_edges_in { + edge_id := d.get_u32()! + edge_type := d.get_string()! + node.edges_in << EdgeRef{ + edge_id: edge_id + edge_type: edge_type + } + } + + return node +} + +// Serializes an Edge struct to bytes +pub fn serialize_edge(edge Edge) []u8 { + mut e := encoder.new() + + // Add version byte + e.add_u8(version_v1) + + // Serialize edge ID + e.add_u32(edge.id) + + // Serialize edge metadata + e.add_u32(edge.from_node) + e.add_u32(edge.to_node) + e.add_string(edge.edge_type) + e.add_u16(edge.weight) + + // Serialize properties + e.add_u16(u16(edge.properties.len)) + for key, value in edge.properties { + e.add_string(key) + e.add_string(value) + } + + return e.data +} + +// Deserializes bytes to an Edge struct +pub fn deserialize_edge(data []u8) !Edge { + if data.len < 1 { + return error('Invalid edge data: too short') + } + + mut d := encoder.decoder_new(data) + + // Check version + version := d.get_u8()! + if version != version_v1 { + return error('Unsupported version: ${version}') + } + + mut edge := Edge{ + properties: map[string]string{} + } + + // Deserialize edge ID + edge.id = d.get_u32()! + + // Deserialize edge metadata + edge.from_node = d.get_u32()! + edge.to_node = d.get_u32()! + edge.edge_type = d.get_string()! + edge.weight = d.get_u16()! + + // Deserialize properties + num_properties := d.get_u16()! + for _ in 0 .. num_properties { + key := d.get_string()! + value := d.get_string()! + edge.properties[key] = value + } + + return edge +} diff --git a/lib/data/graphdb/serialization_test.v b/lib/data/graphdb/serialization_test.v new file mode 100644 index 00000000..1d988cda --- /dev/null +++ b/lib/data/graphdb/serialization_test.v @@ -0,0 +1,202 @@ +module graphdb + +fn test_node_serialization() { + // Create a test node with all fields populated + node := Node{ + node_type: 'user' + properties: { + 'name': 'John Doe' + 'age': '30' + 'email': 'john@example.com' + } + edges_out: [ + EdgeRef{ + edge_id: 1 + edge_type: 'follows' + }, + EdgeRef{ + edge_id: 2 + edge_type: 'likes' + }, + ] + edges_in: [ + EdgeRef{ + edge_id: 3 + edge_type: 'followed_by' + }, + ] + } + + // Serialize the node + serialized := serialize_node(node) + + // Deserialize back to node + deserialized := deserialize_node(serialized) or { + assert false, 'Failed to deserialize node: ${err}' + Node{} + } + + // Verify all fields match + assert deserialized.node_type == node.node_type, 'node_type mismatch' + assert deserialized.properties.len == node.properties.len, 'properties length mismatch' + for key, value in node.properties { + assert deserialized.properties[key] == value, 'property ${key} mismatch' + } + assert deserialized.edges_out.len == node.edges_out.len, 'edges_out length mismatch' + for i, edge in node.edges_out { + assert deserialized.edges_out[i].edge_id == edge.edge_id, 'edge_out ${i} id mismatch' + assert deserialized.edges_out[i].edge_type == edge.edge_type, 'edge_out ${i} type mismatch' + } + assert deserialized.edges_in.len == node.edges_in.len, 'edges_in length mismatch' + for i, edge in node.edges_in { + assert deserialized.edges_in[i].edge_id == edge.edge_id, 'edge_in ${i} id mismatch' + assert deserialized.edges_in[i].edge_type == edge.edge_type, 'edge_in ${i} type mismatch' + } +} + +fn test_edge_serialization() { + // Create a test edge with all fields populated + edge := Edge{ + from_node: 1 + to_node: 2 + edge_type: 'follows' + weight: 5 + properties: { + 'created_at': '2024-01-31' + 'active': 'true' + } + } + + // Serialize the edge + serialized := serialize_edge(edge) + + // Deserialize back to edge + deserialized := deserialize_edge(serialized) or { + assert false, 'Failed to deserialize edge: ${err}' + Edge{} + } + + // Verify all fields match + assert deserialized.from_node == edge.from_node, 'from_node mismatch' + assert deserialized.to_node == edge.to_node, 'to_node mismatch' + assert deserialized.edge_type == edge.edge_type, 'edge_type mismatch' + assert deserialized.weight == edge.weight, 'weight mismatch' + assert deserialized.properties.len == edge.properties.len, 'properties length mismatch' + for key, value in edge.properties { + assert deserialized.properties[key] == value, 'property ${key} mismatch' + } +} + +fn test_node_serialization_empty() { + // Test with empty node + node := Node{ + node_type: '' + properties: map[string]string{} + edges_out: []EdgeRef{} + edges_in: []EdgeRef{} + } + + serialized := serialize_node(node) + deserialized := deserialize_node(serialized) or { + assert false, 'Failed to deserialize empty node: ${err}' + Node{} + } + + assert deserialized.node_type == '', 'empty node_type mismatch' + assert deserialized.properties.len == 0, 'empty properties mismatch' + assert deserialized.edges_out.len == 0, 'empty edges_out mismatch' + assert deserialized.edges_in.len == 0, 'empty edges_in mismatch' +} + +fn test_edge_serialization_empty() { + // Test with empty edge + edge := Edge{ + from_node: 0 + to_node: 0 + edge_type: '' + weight: 0 + properties: map[string]string{} + } + + serialized := serialize_edge(edge) + deserialized := deserialize_edge(serialized) or { + assert false, 'Failed to deserialize empty edge: ${err}' + Edge{} + } + + assert deserialized.from_node == 0, 'empty from_node mismatch' + assert deserialized.to_node == 0, 'empty to_node mismatch' + assert deserialized.edge_type == '', 'empty edge_type mismatch' + assert deserialized.weight == 0, 'empty weight mismatch' + assert deserialized.properties.len == 0, 'empty properties mismatch' +} + +fn test_version_compatibility() { + // Test version checking + node := Node{ + node_type: 'test' + } + mut serialized := serialize_node(node) + + // Modify version byte to invalid version + serialized[0] = 99 + + // Should fail with version error + deserialize_node(serialized) or { + assert err.msg().contains('Unsupported version'), 'Expected version error' + return + } + assert false, 'Expected error for invalid version' +} + +fn test_large_property_values() { + // Create a large string that's bigger than the slice bounds we're seeing in the error (20043) + mut large_value := '' + for _ in 0 .. 25000 { + large_value += 'x' + } + + // Create a node with the large property value + node := Node{ + node_type: 'test' + properties: { + 'large_prop': large_value + } + } + + // Serialize and deserialize + serialized := serialize_node(node) + deserialized := deserialize_node(serialized) or { + assert false, 'Failed to deserialize node with large property: ${err}' + Node{} + } + + // Verify the large property was preserved + assert deserialized.properties['large_prop'] == large_value, 'large property value mismatch' +} + +fn test_data_validation() { + // Test with invalid data + invalid_data := []u8{} + deserialize_node(invalid_data) or { + assert err.msg().contains('too short'), 'Expected data length error' + return + } + assert false, 'Expected error for empty data' + + // Test with truncated data + node := Node{ + node_type: 'test' + properties: { + 'key': 'value' + } + } + serialized := serialize_node(node) + truncated := serialized[..serialized.len / 2] + + deserialize_node(truncated) or { + assert err.msg().contains('Invalid'), 'Expected truncation error' + return + } + assert false, 'Expected error for truncated data' +} diff --git a/lib/data/location/db.v b/lib/data/location/db.v new file mode 100644 index 00000000..9569eddb --- /dev/null +++ b/lib/data/location/db.v @@ -0,0 +1,69 @@ +module location + +import db.pg +import os +import encoding.csv +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.clients.postgresql_client + +// LocationDB handles all database operations for locations +pub struct LocationDB { +pub mut: + db pg.DB + db_client postgresql_client.PostgresClient + tmp_dir pathlib.Path + db_dir pathlib.Path +} + +// new_location_db creates a new LocationDB instance +pub fn new_location_db(mut db_client postgresql_client.PostgresClient, reset bool) !LocationDB { + mut db_dir := pathlib.get_dir(path:'${os.home_dir()}/hero/var/db/location.db',create: true)! + + // Create locations database if it doesn't exist + if !db_client.db_exists('locations')! { + db_client.db_create('locations')! + } + + // Switch to locations database + db_client.dbname = 'locations' + + // Get the underlying pg.DB connection + db := db_client.db()! + + mut loc_db := LocationDB{ + db: db + db_client: db_client + tmp_dir: pathlib.get_dir(path: '/tmp/location/',create: true)! + db_dir: db_dir + } + loc_db.init_tables(reset)! + return loc_db +} + +// init_tables drops and recreates all tables +fn (mut l LocationDB) init_tables(reset bool) ! { + if reset { + sql l.db { + drop table AlternateName + drop table City + drop table Country + }! + } + + sql l.db { + create table Country + create table City + create table AlternateName + }! + + // When resetting, ensure all countries have import_date set to 0 + if reset { + l.db.exec('UPDATE Country SET import_date = 0')! + } +} + +// close closes the database connection +pub fn (mut l LocationDB) close() ! { + l.db.close() +} diff --git a/lib/data/location/factory.v b/lib/data/location/factory.v new file mode 100644 index 00000000..afe2139f --- /dev/null +++ b/lib/data/location/factory.v @@ -0,0 +1,62 @@ +module location + +import freeflowuniverse.herolib.clients.postgresql_client + +// Location represents the main API for location operations +pub struct Location { +mut: + db LocationDB + db_client postgresql_client.PostgresClient +} + +// new creates a new Location instance +pub fn new(mut db_client postgresql_client.PostgresClient, reset bool) !Location { + db := new_location_db(mut db_client, reset)! + return Location{ + db: db + db_client: db_client + } +} + +// init_database downloads and imports the initial dataset +pub fn (mut l Location) download_and_import(redownload bool) ! { + l.db.download_and_import_data(redownload)! +} + +// Example usage: +/* +fn main() ! { + // Configure and get PostgreSQL client + heroscript := " + !!postgresql_client.configure + name:'test' + user: 'postgres' + port: 5432 + host: 'localhost' + password: '1234' + dbname: 'postgres' + " + postgresql_client.play(heroscript: heroscript)! + mut db_client := postgresql_client.get(name: "test")! + + // Create a new location instance with db_client + mut loc := location.new(db_client, false)! + + // Initialize the database (downloads and imports data) + // Only needs to be done once or when updating data + loc.download_and_import(false)! + + // Search for a city + results := loc.search('London', 'GB', 5, true)! + for result in results { + println('${result.city.name}, ${result.country.name} (${result.country.iso2})') + println('Coordinates: ${result.city.latitude}, ${result.city.longitude}') + } + + // Search near coordinates (e.g., 10km radius from London) + nearby := loc.search_near(51.5074, -0.1278, 10.0, 5)! + for result in nearby { + println('${result.city.name} is nearby') + } +} +*/ diff --git a/lib/data/location/geonames.v b/lib/data/location/geonames.v new file mode 100644 index 00000000..d722e5a0 --- /dev/null +++ b/lib/data/location/geonames.v @@ -0,0 +1,4 @@ +module location + +//https://www.geonames.org/export/codes.html + diff --git a/lib/data/location/importer.v b/lib/data/location/importer.v new file mode 100644 index 00000000..2af18738 --- /dev/null +++ b/lib/data/location/importer.v @@ -0,0 +1,307 @@ +module location + +import os +import io +import time +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.texttools + +const ( + geonames_url = 'https://download.geonames.org/export/dump' +) + +// download_and_import_data downloads and imports GeoNames data +pub fn (mut l LocationDB) download_and_import_data(redownload bool) ! { + // Download country info + + if redownload{ + l.reset_import_dates()! + } + + country_file := osal.download( + url: '${geonames_url}/countryInfo.txt' + dest: '${l.tmp_dir.path}/country.txt' + minsize_kb: 10 + )! + l.import_country_data(country_file.path)! + + l.import_cities()! + +} + +// reset_import_dates sets all country import_dates to 0 +pub fn (mut l LocationDB) reset_import_dates() ! { + l.db.exec('BEGIN TRANSACTION')! + l.db.exec('UPDATE Country SET import_date = 0')! + l.db.exec('COMMIT')! + console.print_header('Reset all country import dates to 0') +} + +// should_import_cities checks if a city should be imported based on its last import date on country level +fn (mut l LocationDB) should_import_cities(iso2 string) !bool { + console.print_debug('Checking if should import country: ${iso2}') + + country := sql l.db { + select from Country where iso2 == "${iso2}" limit 1 + } or { []Country{} } + + console.print_debug('SQL query result: ${country.len} records found') + + if country.len == 0 { + console.print_debug('No existing record found for ${iso2}, will import') + return true // New country, should import + } + + // Check if last import was more than a month ago + now := time.now().unix() + one_month := i64(30 * 24 * 60 * 60) // 30 days in seconds + last_import := country[0].import_date + time_since_import := now - last_import + + console.print_debug('Last import: ${last_import}, Time since import: ${time_since_import} seconds (${time_since_import/86400} days)') + should_import := (time_since_import > one_month) || (last_import == 0) + console.print_debug('Should import ${iso2}: ${should_import}') + + return should_import +} + +// import_country_data imports country information from a file +fn (mut l LocationDB) import_country_data(filepath string) ! { + console.print_header('Starting import from: ${filepath}') + l.db.exec('BEGIN TRANSACTION')! + + mut file := os.open(filepath) or { + console.print_stderr('Failed to open country file: ${err}') + return err + } + defer { file.close() } + + mut reader := io.new_buffered_reader(reader: file) + defer { reader.free() } + + mut count := 0 + for { + line := reader.read_line() or { break } + if line.starts_with('#') { + continue + } + fields := line.split('\t') + if fields.len < 5 { + continue + } + + iso2 := fields[0] + // Check if country exists + existing_country := sql l.db { + select from Country where iso2 == iso2 + } or { []Country{} } + + country := Country{ + iso2: iso2 + iso3: fields[1] + name: fields[4] + continent: fields[8] + population: fields[7].i64() + timezone: fields[17] + } + + if existing_country.len > 0 { + // Update existing country + sql l.db { + update Country set + iso3 = country.iso3, + name = country.name, + continent = country.continent, + population = country.population, + timezone = country.timezone + where iso2 == iso2 + }! + //console.print_debug("Updated country: ${country}") + } else { + // Insert new country + sql l.db { + insert country into Country + }! + //console.print_debug("Inserted country: ${country}") + } + count++ + if count % 10 == 0 { + console.print_header('Processed ${count} countries') + } + } + + l.db.exec('COMMIT')! + console.print_header('Finished importing countries. Total records: ${count}') +} + +// import_cities imports city information for all countries +fn (mut l LocationDB) import_cities() ! { + console.print_header('Starting Cities Import') + + // Query all countries from the database + mut countries := sql l.db { + select from Country + }! + + // Process each country + for country in countries { + iso2 := country.iso2.to_upper() + console.print_header('Processing country: ${country.name} (${iso2})') + + // Check if we need to import cities for this country + should_import := l.should_import_cities(iso2)! + if !should_import { + console.print_debug('Skipping ${country.name} (${iso2}) - recently imported') + continue + } + + // Download and process cities for this country + cities_file := osal.download( + url: '${geonames_url}/${iso2}.zip' + dest: '${l.tmp_dir.path}/${iso2}.zip' + expand_file: '${l.tmp_dir.path}/${iso2}' + minsize_kb: 2 + )! + + l.import_city_data("${l.tmp_dir.path}/${iso2}/${iso2}.txt")! + + // Update the country's import date after successful city import + now := time.now().unix() + l.db.exec('BEGIN TRANSACTION')! + sql l.db { + update Country set import_date = now where iso2 == iso2 + }! + l.db.exec('COMMIT')! + console.print_debug('Updated import date for ${country.name} (${iso2}) to ${now}') + } +} + +fn (mut l LocationDB) import_city_data(filepath string) ! { + console.print_header('City Import: Starting import from: ${filepath}') + + // the table has the following fields : + // --------------------------------------------------- + // geonameid : integer id of record in geonames database + // name : name of geographical point (utf8) varchar(200) + // asciiname : name of geographical point in plain ascii characters, varchar(200) + // alternatenames : alternatenames, comma separated, ascii names automatically transliterated, convenience attribute from alternatename table, varchar(10000) + // latitude : latitude in decimal degrees (wgs84) + // longitude : longitude in decimal degrees (wgs84) + // feature class : see http://www.geonames.org/export/codes.html, char(1) + // feature code : see http://www.geonames.org/export/codes.html, varchar(10) + // country code : ISO-3166 2-letter country code, 2 characters + // cc2 : alternate country codes, comma separated, ISO-3166 2-letter country code, 200 characters + // admin1 code : fipscode (subject to change to iso code), see exceptions below, see file admin1Codes.txt for display names of this code; varchar(20) + // admin2 code : code for the second administrative division, a county in the US, see file admin2Codes.txt; varchar(80) + // admin3 code : code for third level administrative division, varchar(20) + // admin4 code : code for fourth level administrative division, varchar(20) + // population : bigint (8 byte int) + // elevation : in meters, integer + // dem : digital elevation model, srtm3 or gtopo30, average elevation of 3''x3'' (ca 90mx90m) or 30''x30'' (ca 900mx900m) area in meters, integer. srtm processed by cgiar/ciat. + // timezone : the iana timezone id (see file timeZone.txt) varchar(40) + // modification date : date of last modification in yyyy-MM-dd format + + + + l.db.exec('BEGIN TRANSACTION')! + + mut file := os.open(filepath) or { + console.print_stderr('Failed to open city file: ${err}') + return err + } + defer { file.close() } + + mut reader := io.new_buffered_reader(reader:file) + defer { reader.free() } + + mut count := 0 + console.print_header('Start import ${filepath}') + for { + line := reader.read_line() or { + //console.print_debug('End of file reached') + break + } + //console.print_debug(line) + fields := line.split('\t') + if fields.len < 12 { // Need at least 12 fields for required data + console.print_stderr('fields < 12: ${line}') + continue + } + + // Parse fields according to geonames format + geoname_id := fields[0].int() + name := fields[1] + ascii_name := texttools.name_fix(fields[2]) + country_iso2 := fields[8].to_upper() + + // Check if city exists + existing_city := sql l.db { + select from City where id == geoname_id + } or { []City{} } + + city := City{ + id: geoname_id + name: name + ascii_name: ascii_name + country_iso2: country_iso2 + postal_code: '' // Not provided in this format + state_name: '' // Will need separate admin codes file + state_code: fields[10] + county_name: '' + county_code: fields[11] + community_name: '' + community_code: '' + latitude: fields[4].f64() + longitude: fields[5].f64() + accuracy: 4 // Using geonameid, so accuracy is 4 + population: fields[14].i64() + timezone: fields[17] + feature_class: fields[6] + feature_code: fields[7] + search_priority: 0 // Default priority + } + + if existing_city.len > 0 { + // Update existing city + sql l.db { + update City set + name = city.name, + ascii_name = city.ascii_name, + country_iso2 = city.country_iso2, + postal_code = city.postal_code, + state_name = city.state_name, + state_code = city.state_code, + county_name = city.county_name, + county_code = city.county_code, + community_name = city.community_name, + community_code = city.community_code, + latitude = city.latitude, + longitude = city.longitude, + accuracy = city.accuracy, + population = city.population, + timezone = city.timezone, + feature_class = city.feature_class, + feature_code = city.feature_code, + search_priority = city.search_priority + where id == geoname_id + }! + //console.print_debug("Updated city: ${city}") + } else { + // Insert new city + sql l.db { + insert city into City + }! + //console.print_debug("Inserted city: ${city}") + } + count++ + // if count % 1000 == 0 { + // console.print_header( 'Processed ${count} cities') + // } + } + + console.print_debug( 'Processed ${count} cities') + + l.db.exec('COMMIT')! + console.print_header('Finished importing cities for ${filepath}. Total records: ${count}') +} diff --git a/lib/data/location/models.v b/lib/data/location/models.v new file mode 100644 index 00000000..c5b921a4 --- /dev/null +++ b/lib/data/location/models.v @@ -0,0 +1,77 @@ +module location + +pub struct Country { +pub: + iso2 string @[primary; sql: 'iso2'; max_len: 2; unique; index] + name string @[required; unique; index] + iso3 string @[required; sql: 'iso3'; max_len: 3; unique; index] + continent string @[max_len: 2] + population i64 + timezone string @[max_len: 40] + import_date i64 // Epoch timestamp of last import +} + +pub struct City { +pub: + id int @[unique; index] + name string @[required; max_len: 200; index] + ascii_name string @[required; max_len: 200; index] // Normalized name without special characters + country_iso2 string @[required; fkey: 'Country.iso2'] + postal_code string @[max_len: 20; index ] //postal code + state_name string @[max_len: 100] // State/Province name + state_code string @[max_len: 20] // State/Province code + county_name string @[max_len: 100] + county_code string @[max_len: 20] + community_name string @[max_len: 100] + community_code string @[max_len: 20] + latitude f64 @[index: 'idx_coords'] + longitude f64 @[index: 'idx_coords'] + population i64 + timezone string @[max_len: 40] + feature_class string @[max_len: 1] // For filtering (P for populated places) + feature_code string @[max_len: 10] // Detailed type (PPL, PPLA, etc.) + search_priority int + accuracy i16 = 1 //1=estimated, 4=geonameid, 6=centroid of addresses or shape +} + +pub struct AlternateName { +pub: + id int @[primary; sql: serial] + city_id int @[required; fkey: 'City.id'] + name string @[required; max_len: 200; index] + language_code string @[max_len: 2] + is_preferred bool + is_short bool +} + +// SearchResult represents a location search result with combined city and country info +pub struct SearchResult { +pub: + city City + country Country + similarity f64 // Search similarity score +} + +// Coordinates represents a geographic point +pub struct Coordinates { +pub: + latitude f64 + longitude f64 +} + +// SearchOptions represents parameters for location searches +pub struct SearchOptions { +pub: + query string + country_code string + limit int = 10 + fuzzy bool +} + +// CoordinateSearchOptions represents parameters for coordinate-based searches +pub struct CoordinateSearchOptions { +pub: + coordinates Coordinates + radius f64 // in kilometers + limit int = 10 +} diff --git a/lib/data/location/readme.md b/lib/data/location/readme.md new file mode 100644 index 00000000..6ba3c5c9 --- /dev/null +++ b/lib/data/location/readme.md @@ -0,0 +1,5 @@ + + +make sure to do + +brew install libpq \ No newline at end of file diff --git a/lib/data/location/search.v b/lib/data/location/search.v new file mode 100644 index 00000000..4c276427 --- /dev/null +++ b/lib/data/location/search.v @@ -0,0 +1,177 @@ +module location + +import db.pg + +// // search searches for locations based on the provided options +// pub fn (l Location) search(query string, country_code string, limit int, fuzzy bool) ![]SearchResult { +// opts := SearchOptions{ +// query: query +// country_code: country_code +// limit: limit +// fuzzy: fuzzy +// } +// return l.db.search_locations(opts) +// } + +// // search_near searches for locations near the given coordinates +// pub fn (l Location) search_near(lat f64, lon f64, radius f64, limit int) ![]SearchResult { +// opts := CoordinateSearchOptions{ +// coordinates: Coordinates{ +// latitude: lat +// longitude: lon +// } +// radius: radius +// limit: limit +// } +// return l.db.search_by_coordinates(opts) +// } + +// // search_locations searches for locations based on the provided options +// pub fn (l LocationDB) search_locations(opts SearchOptions) ![]SearchResult { +// mut query_conditions := []string{} +// mut params := []string{} + +// if opts.query != '' { +// if opts.fuzzy { +// query_conditions << '(c.name ILIKE $${params.len + 1} OR c.ascii_name ILIKE $${params.len + 2})' +// params << '%${opts.query}%' +// params << '%${opts.query}%' +// } else { +// query_conditions << '(c.name = $${params.len + 1} OR c.ascii_name = $${params.len + 2})' +// params << opts.query +// params << opts.query +// } +// } + +// if opts.country_code != '' { +// query_conditions << 'c.country_iso2 = $${params.len + 1}' +// params << opts.country_code +// } + +// where_clause := if query_conditions.len > 0 { 'WHERE ' + query_conditions.join(' AND ') } else { '' } + +// query := ' +// SELECT c.*, co.* +// FROM City c +// JOIN Country co ON c.country_iso2 = co.iso2 +// ${where_clause} +// ORDER BY c.search_priority DESC, c.population DESC +// LIMIT ${opts.limit} +// ' + +// rows := l.db.exec_param_many(query, params)! +// mut results := []SearchResult{cap: rows.len} + +// for row in rows { +// city := City{ +// id: row.vals[0].int() or { 0 } +// name: row.vals[1] or { '' } +// ascii_name: row.vals[2] or { '' } +// country_iso2: row.vals[3] or { '' } +// postal_code: row.vals[4] or { '' } +// state_name: row.vals[5] or { '' } +// state_code: row.vals[6] or { '' } +// county_name: row.vals[7] or { '' } +// county_code: row.vals[8] or { '' } +// community_name: row.vals[9] or { '' } +// community_code: row.vals[10] or { '' } +// latitude: row.vals[11].f64() or { 0.0 } +// longitude: row.vals[12].f64() or { 0.0 } +// population: row.vals[13].i64() or { 0 } +// timezone: row.vals[14] or { '' } +// feature_class: row.vals[15] or { '' } +// feature_code: row.vals[16] or { '' } +// search_priority: row.vals[17].int() or { 0 } +// accuracy: u8(row.vals[18].int() or { 1 }) +// } + +// country := Country{ +// iso2: row.vals[19] or { '' } +// name: row.vals[20] or { '' } +// iso3: row.vals[21] or { '' } +// continent: row.vals[22] or { '' } +// population: row.vals[23].i64() or { 0 } +// timezone: row.vals[24] or { '' } +// import_date: row.vals[25].i64() or { 0 } +// } + +// results << SearchResult{ +// city: city +// country: country +// similarity: 1.0 // TODO: implement proper similarity scoring +// } +// } + +// return results +// } + +// // search_by_coordinates finds locations near the given coordinates +// pub fn (l LocationDB) search_by_coordinates(opts CoordinateSearchOptions) ![]SearchResult { +// // Use the Haversine formula to calculate distances +// query := " +// WITH distances AS ( +// SELECT c.*, co.*, +// (6371 * acos(cos(radians($1)) * cos(radians(latitude)) * +// cos(radians(longitude) - radians($2)) + sin(radians($1)) * +// sin(radians(latitude)))) AS distance +// FROM City c +// JOIN Country co ON c.country_iso2 = co.iso2 +// ) +// SELECT * FROM distances +// WHERE distance < $3 +// ORDER BY distance +// LIMIT $4 +// " + +// params := [ +// opts.coordinates.latitude.str(), +// opts.coordinates.longitude.str(), +// opts.radius.str(), +// opts.limit.str() +// ] +// rows := l.db.exec_param_many(query, params)! + +// mut results := []SearchResult{cap: rows.len} + +// for row in rows { +// city := City{ +// id: row.vals[0].int() or { 0 } +// name: row.vals[1] or { '' } +// ascii_name: row.vals[2] or { '' } +// country_iso2: row.vals[3] or { '' } +// postal_code: row.vals[4] or { '' } +// state_name: row.vals[5] or { '' } +// state_code: row.vals[6] or { '' } +// county_name: row.vals[7] or { '' } +// county_code: row.vals[8] or { '' } +// community_name: row.vals[9] or { '' } +// community_code: row.vals[10] or { '' } +// latitude: row.vals[11].f64() or { 0.0 } +// longitude: row.vals[12].f64() or { 0.0 } +// population: row.vals[13].i64() or { 0 } +// timezone: row.vals[14] or { '' } +// feature_class: row.vals[15] or { '' } +// feature_code: row.vals[16] or { '' } +// search_priority: row.vals[17].int() or { 0 } +// accuracy: u8(row.vals[18].int() or { 1 }) +// } + +// country := Country{ +// iso2: row.vals[19] or { '' } +// name: row.vals[20] or { '' } +// iso3: row.vals[21] or { '' } +// continent: row.vals[22] or { '' } +// population: row.vals[23].i64() or { 0 } +// timezone: row.vals[24] or { '' } +// import_date: row.vals[25].i64() or { 0 } +// } + +// results << SearchResult{ +// city: city +// country: country +// similarity: 1.0 +// } +// } + +// return results +// } diff --git a/lib/data/markdownparser/parsers/parser_line.v b/lib/data/markdownparser/parsers/parser_line.v index 6c2a658a..5c08a148 100644 --- a/lib/data/markdownparser/parsers/parser_line.v +++ b/lib/data/markdownparser/parsers/parser_line.v @@ -29,7 +29,7 @@ fn parser_line_new(mut doc elements.Doc) !Parser { if doc.content.starts_with('+++') { mut frontmatter_content := '' mut lines := doc.content.split_into_lines() - lines = lines[1..] // Skip the opening '+++' + lines = lines[1..].clone() // Skip the opening '+++' with explicit clone for line in lines { if line.trim_space() == '+++' { diff --git a/lib/data/ourdb/README.md b/lib/data/ourdb/README.md index 308f0e34..2e7b1d19 100644 --- a/lib/data/ourdb/README.md +++ b/lib/data/ourdb/README.md @@ -5,18 +5,22 @@ OurDB is a lightweight, efficient key-value database implementation in V that pr ## Usage Example ```v - -//record_nr_max u32 = 16777216 - 1 // max number of records -//record_size_max u32 = 1024*4 // max record size (4KB default) -//file_size u32 = 500 * (1 << 20) // file size (500MB default) -//path string // storage directory - import freeflowuniverse.herolib.data.ourdb -mut db := ourdb.new(path:"/tmp/mydb")! +// Configure and create a new database instance +mut db := ourdb.new( + path: '/tmp/mydb', // storage directory + record_nr_max: 16777216 - 1, // max number of records (default) + record_size_max: 1024 * 4, // max record size (4KB default) + file_size: 500 * (1 << 20), // file size (500MB default) + incremental_mode: true // enable auto-incrementing IDs (default) +)! -// Store data (note: set() takes []u8 as value) -db.set(1, 'Hello World'.bytes())! +// Store data with auto-incrementing ID (incremental mode) +id := db.set(data: 'Hello World'.bytes())! + +// Store data with specific ID (is an update) +id2 := db.set(id: 1, data: 'Hello Again'.bytes())! // Retrieve data data := db.get(1)! // Returns []u8 @@ -28,7 +32,6 @@ history := db.get_history(1, 5)! // Get last 5 versions db.delete(1)! ``` - ## Features - Efficient key-value storage @@ -37,6 +40,19 @@ db.delete(1)! - Support for multiple backend files - Configurable record sizes and counts - Memory and disk-based lookup tables +- Optional incremental ID mode + +## Configuration Options + +```v +struct OurDBConfig { + record_nr_max u32 = 16777216 - 1 // max size of records + record_size_max u32 = 1024 * 4 // max size in bytes of a record (4KB default) + file_size u32 = 500 * (1 << 20) // file size (500MB default) + path string // directory where we will store the DB + incremental_mode bool = true // enable auto-incrementing IDs +} +``` ## Architecture @@ -46,26 +62,29 @@ OurDB consists of three main components working together in a layered architectu - Provides the public API for database operations - Handles high-level operations (set, get, delete, history) - Coordinates between lookup and backend components -- Located in `db.v` +- Supports both key-value and incremental ID modes ### 2. Lookup Table (lookup.v) - Maps keys to physical locations in the backend storage - Supports both memory and disk-based lookup tables -- Configurable key sizes for optimization +- Automatically optimizes key sizes based on database configuration - Handles sparse data efficiently -- Located in `lookup.v` +- Provides next ID generation for incremental mode ### 3. Backend Storage (backend.v) - Manages the actual data storage in files - Handles data integrity with CRC32 checksums - Supports multiple file backends for large datasets - Implements the low-level read/write operations -- Located in `backend.v` ## File Structure - `db.v`: Frontend interface providing the public API -- `lookup.v`: Implementation of the lookup table system +- `lookup.v`: Core lookup table implementation +- `lookup_location.v`: Location tracking implementation +- `lookup_location_test.v`: Location tracking tests +- `lookup_id_test.v`: ID generation tests +- `lookup_test.v`: General lookup table tests - `backend.v`: Low-level data storage implementation - `factory.v`: Database initialization and configuration - `db_test.v`: Test suite for verifying functionality @@ -73,16 +92,19 @@ OurDB consists of three main components working together in a layered architectu ## How It Works 1. **Frontend Operations** - - When you call `set(key, value)`, the frontend: - 1. Gets the storage location from the lookup table - 2. Passes the data to the backend for storage - 3. Updates the lookup table with any new location + - When you call `set()`, the frontend: + 1. In incremental mode, generates the next ID or uses provided ID + 2. Gets the storage location from the lookup table + 3. Passes the data to the backend for storage + 4. Updates the lookup table with any new location 2. **Lookup Table** - Maintains a mapping between keys and physical locations - - Optimizes key size based on maximum record count - - Can be memory-based for speed or disk-based for large datasets - - Supports sparse data storage for efficient space usage + - Optimizes key size based on: + - Total number of records (affects address space) + - Record size and count (determines file splitting) + - Supports incremental ID generation + - Persists lookup data to disk for recovery 3. **Backend Storage** - Stores data in one or multiple files @@ -103,13 +125,15 @@ Each record in the backend storage includes: - N bytes: Actual data ### Lookup Table Optimization -The lookup table automatically optimizes its key size based on: -- Total number of records (affects address space) -- Record size and count (determines file splitting) -- Available memory (can switch to disk-based lookup) +The lookup table automatically optimizes its key size based on the database configuration: +- 2 bytes: For databases with < 65,536 records +- 3 bytes: For databases with < 16,777,216 records +- 4 bytes: For databases with < 4,294,967,296 records +- 6 bytes: For large databases requiring multiple files ### File Management - Supports splitting data across multiple files when needed - Each file is limited to 500MB by default (configurable) - Automatic file selection based on record location - Files are created as needed with format: `${path}/${file_nr}.db` +- Lookup table state is persisted in `${path}/lookup_dump.db` diff --git a/lib/data/ourdb/backend.v b/lib/data/ourdb/backend.v index 89168e0f..505b8e97 100644 --- a/lib/data/ourdb/backend.v +++ b/lib/data/ourdb/backend.v @@ -22,28 +22,18 @@ fn (mut db OurDB) db_file_select(file_nr u16) ! { path := '${db.path}/${file_nr}.db' - if db.file_nr == file_nr { - // make sure file is opened - if !db.file.is_opened { - if !os.exists(path) { - db.create_new_db_file(file_nr)! - } - - mut file := os.open_file(path, 'r+')! - db.file = file - } - return - } - + // Always close the current file if it's open if db.file.is_opened { db.file.close() } + // Create file if it doesn't exist if !os.exists(path) { db.create_new_db_file(file_nr)! } - mut file := os.open_file(path, 'r+')! + // Open the file fresh + mut file := os.open_file(path, 'r+')! db.file = file db.file_nr = file_nr } @@ -85,6 +75,7 @@ pub fn (mut db OurDB) set_(x u32, old_location Location, data []u8) ! { file_nr: file_nr position: u32(db.file.tell()!) } + // println('Writing data at position: ${new_location.position}, file_nr: ${file_nr}') // Calculate CRC of data crc := calculate_crc(data) @@ -118,6 +109,9 @@ pub fn (mut db OurDB) set_(x u32, old_location Location, data []u8) ! { // Update lookup table with new position db.lookup.set(x, new_location)! + + // Ensure lookup table is synced + // db.save()! } // get retrieves data at specified location @@ -150,6 +144,7 @@ fn (mut db OurDB) get_(location Location) ![]u8 { if data_read_bytes != int(size) { return error('failed to read data bytes') } + // println('Reading data from position: ${location.position}, file_nr: ${location.file_nr}, size: ${size}, data: ${data}') // Verify CRC calculated_crc := calculate_crc(data) diff --git a/lib/data/ourdb/db.v b/lib/data/ourdb/db.v index 721abc73..121d4a74 100644 --- a/lib/data/ourdb/db.v +++ b/lib/data/ourdb/db.v @@ -18,7 +18,8 @@ import os // and maintains a linked list of previous values for history tracking // Returns the ID used (either x if specified, or auto-incremented if x=0) @[params] -struct OurDBSetArgs { +pub struct OurDBSetArgs { +pub mut: id ?u32 data []u8 @[required] } @@ -35,7 +36,6 @@ pub fn (mut db OurDB) set(args OurDBSetArgs) !u32 { } db.set_(id, location, args.data)! - db.lookup.set(id, location)! // TODO: maybe not needed return id } @@ -91,6 +91,15 @@ pub fn (mut db OurDB) delete(x u32) ! { db.lookup.delete(x)! } +// get_next_id returns the next id which will be used when storing +pub fn (mut db OurDB) get_next_id() !u32 { + if !db.incremental_mode { + return error('incremental mode is not enabled') + } + next_id := db.lookup.get_next_id()! + return next_id +} + // close closes the database file fn (mut db OurDB) lookup_dump_path() string { return '${db.path}/lookup_dump.db' @@ -115,6 +124,7 @@ fn (mut db OurDB) close() ! { db.close_() } -fn (mut db OurDB) destroy() ! { - os.rmdir_all(db.path)! +pub fn (mut db OurDB) destroy() ! { + db.close() or {} + os.rmdir_all(db.path) or {} } diff --git a/lib/data/ourdb/db_test.v b/lib/data/ourdb/db_test.v index 1753eec5..7778c581 100644 --- a/lib/data/ourdb/db_test.v +++ b/lib/data/ourdb/db_test.v @@ -67,7 +67,7 @@ fn test_history_tracking() { )! defer { - db.destroy() or { panic('failed to destroy db: ${err}') } + db.destroy() or {} } // Create multiple versions of data @@ -155,7 +155,7 @@ fn test_file_switching() { )! defer { - db.destroy() or { panic('failed to destroy db: ${err}') } + db.destroy() or {} } test_data1 := 'Test data'.bytes() diff --git a/lib/data/ourdb/db_update_test.v b/lib/data/ourdb/db_update_test.v new file mode 100644 index 00000000..e47a8eb5 --- /dev/null +++ b/lib/data/ourdb/db_update_test.v @@ -0,0 +1,48 @@ +module ourdb + +import os + +const test_dir = '/tmp/ourdb' + +fn test_db_update() { + // Ensure test directory exists and is empty + if os.exists(test_dir) { + os.rmdir_all(test_dir) or { panic(err) } + } + os.mkdir_all(test_dir) or { panic(err) } + + mut db := new( + record_nr_max: 16777216 - 1 // max size of records + record_size_max: 1024 + path: test_dir + reset: false // Don't reset since we just created a fresh directory + )! + + defer { + db.destroy() or {} + } + + // Test set and get + test_data := 'Hello, World!'.bytes() + id := db.set(data: test_data)! + + retrieved := db.get(id)! + assert retrieved == test_data + + assert id == 0 + + // Test overwrite + new_data := 'Updated data'.bytes() + id2 := db.set(id: 0, data: new_data)! + assert id2 == 0 + + // Verify lookup table has the correct location + location := db.lookup.get(id2)! + println('Location after update - file_nr: ${location.file_nr}, position: ${location.position}') + + // Get and verify the updated data + retrieved2 := db.get(id2)! + println('Retrieved data: ${retrieved2}') + println('Expected data: ${new_data}') + assert retrieved2 == new_data +} diff --git a/lib/data/ourdb/factory.v b/lib/data/ourdb/factory.v index 3f11b113..26ef2fbf 100644 --- a/lib/data/ourdb/factory.v +++ b/lib/data/ourdb/factory.v @@ -24,12 +24,12 @@ const header_size = 12 @[params] pub struct OurDBConfig { pub: - record_nr_max u32 = 16777216 - 1 // max size of records - record_size_max u32 = 1024 * 4 // max size in bytes of a record, is 4 KB default - file_size u32 = 500 * (1 << 20) // 500MB - path string // directory where we will stor the DB - + record_nr_max u32 = 16777216 - 1 // max size of records + record_size_max u32 = 1024 * 4 // max size in bytes of a record, is 4 KB default + file_size u32 = 500 * (1 << 20) // 500MB + path string // directory where we will stor the DB incremental_mode bool = true + reset bool } // new_memdb creates a new memory database with the given path and lookup table @@ -56,6 +56,10 @@ pub fn new(args OurDBConfig) !OurDB { incremental_mode: args.incremental_mode )! + if args.reset { + os.rmdir_all(args.path) or {} + } + os.mkdir_all(args.path)! mut db := OurDB{ path: args.path diff --git a/lib/data/ourdb/lookup.v b/lib/data/ourdb/lookup.v index 7e1bb9fc..eceae9c3 100644 --- a/lib/data/ourdb/lookup.v +++ b/lib/data/ourdb/lookup.v @@ -90,7 +90,7 @@ fn (lut LookupTable) get(x u32) !Location { entry_size := lut.keysize if lut.lookuppath.len > 0 { // Check file size first - file_size := os.file_size(lut.get_data_file_path()!) + file_size := os.file_size(lut.get_data_file_path()!) // THIS SLOWS DOWN, NEED TO DO SOMETHING MORE INTELLIGENCE ONCE start_pos := x * entry_size if start_pos + entry_size > file_size { @@ -325,7 +325,7 @@ fn (mut lut LookupTable) import_data(path string) ! { incremental_file_name))! // Update the incremental value in memory inc_str := os.read_file(os.join_path(path, incremental_file_name))! - println('inc_str: ${inc_str}') + // println('inc_str: ${inc_str}') lut.incremental = inc_str.u32() } return diff --git a/lib/data/radixtree/README.md b/lib/data/radixtree/README.md new file mode 100644 index 00000000..17b2c3a9 --- /dev/null +++ b/lib/data/radixtree/README.md @@ -0,0 +1,132 @@ +# Radix Tree Implementation + +A radix tree (also known as a patricia trie or radix trie) is a space-optimized tree data structure that enables efficient string key operations. This implementation provides a persistent radix tree backed by OurDB for durable storage. + +## Key Features + +- Efficient prefix-based key operations +- Persistent storage using OurDB backend +- Memory-efficient storage of strings with common prefixes +- Support for binary values +- Thread-safe operations through OurDB + +## How It Works + +### Data Structure + +The radix tree is composed of nodes where: +- Each node stores a segment of a key (not just a single character) +- Nodes can have multiple children, each representing a different branch +- Leaf nodes contain the actual values +- Each node is persisted in OurDB with a unique ID + +```v +struct Node { +mut: + key_segment string // The segment of the key stored at this node + value []u8 // Value stored at this node (empty if not a leaf) + children []NodeRef // References to child nodes + is_leaf bool // Whether this node is a leaf node +} +``` + +### OurDB Integration + +The radix tree uses OurDB as its persistent storage backend: +- Each node is serialized and stored as a record in OurDB +- Node references use OurDB record IDs +- The tree maintains a root node ID for traversal +- Node serialization includes version tracking for format evolution + +### Key Operations + +#### Insertion +1. Traverse the tree following matching prefixes +2. Split nodes when partial matches are found +3. Create new nodes for unmatched segments +4. Update node values and references in OurDB + +#### Search +1. Start from the root node +2. Follow child nodes whose key segments match the search key +3. Return the value if an exact match is found at a leaf node + +#### Deletion +1. Locate the node containing the key +2. Remove the value and leaf status +3. Clean up empty nodes if necessary +4. Update parent references + +## Usage Example + +```v +import freeflowuniverse.herolib.data.radixtree + +// Create a new radix tree +mut tree := radixtree.new('/path/to/storage')! + +// Insert key-value pairs +tree.insert('hello', 'world'.bytes())! +tree.insert('help', 'me'.bytes())! + +// Search for values +value := tree.search('hello')! // Returns 'world' as bytes +println(value.bytestr()) // Prints: world + +// Delete keys +tree.delete('help')! +``` + +## Implementation Details + +### Node Serialization + +Nodes are serialized in a compact binary format: +``` +[Version(1B)][KeySegment][ValueLength(2B)][Value][ChildrenCount(2B)][Children][IsLeaf(1B)] +``` + +Where each child is stored as: +``` +[KeyPart][NodeID(4B)] +``` + +### Space Optimization + +The radix tree optimizes space usage by: +1. Sharing common prefixes between keys +2. Storing only key segments at each node instead of complete keys +3. Merging nodes with single children when possible +4. Using OurDB's efficient storage and retrieval mechanisms + +### Performance Characteristics + +- Search: O(k) where k is the key length +- Insert: O(k) for new keys, may require node splitting +- Delete: O(k) plus potential node cleanup +- Space: O(n) where n is the total length of all keys + +## Relationship with OurDB + +This radix tree implementation leverages OurDB's features: +- Persistent storage with automatic file management +- Record-based storage with unique IDs +- Data integrity through CRC32 checksums +- Configurable record sizes +- Automatic file size management + +The integration provides: +- Durability: All tree operations are persisted +- Consistency: Tree state is maintained across restarts +- Efficiency: Leverages OurDB's optimized storage +- Scalability: Handles large datasets through OurDB's file management + +## Use Cases + +Radix trees are particularly useful for: +- Prefix-based searching +- IP routing tables +- Dictionary implementations +- Auto-complete systems +- File system paths +- Any application requiring efficient string key operations with persistence diff --git a/lib/data/radixtree/factory_test.v b/lib/data/radixtree/factory_test.v new file mode 100644 index 00000000..dcbdf694 --- /dev/null +++ b/lib/data/radixtree/factory_test.v @@ -0,0 +1,126 @@ +module radixtree + +fn test_basic_operations() ! { + mut rt := new(path: '/tmp/radixtree_test', reset: true)! + + // Test insert and search + rt.insert('test', 'value1'.bytes())! + value1 := rt.search('test')! + assert value1.bytestr() == 'value1' + + // Test updating existing key + rt.insert('test', 'value2'.bytes())! + value2 := rt.search('test')! + assert value2.bytestr() == 'value2' + + // Test non-existent key + if _ := rt.search('nonexistent') { + assert false, 'Expected error for non-existent key' + } + + // Test delete + rt.delete('test')! + mut ok := false + if _ := rt.search('test') { + ok = true + } + assert ok +} + +fn test_prefix_matching() ! { + mut rt := new(path: '/tmp/radixtree_test_prefix')! + + // Insert keys with common prefixes + rt.insert('team', 'value1'.bytes())! + rt.insert('test', 'value2'.bytes())! + rt.insert('testing', 'value3'.bytes())! + + // Verify each key has correct value + value1 := rt.search('team')! + assert value1.bytestr() == 'value1' + + value2 := rt.search('test')! + assert value2.bytestr() == 'value2' + + value3 := rt.search('testing')! + assert value3.bytestr() == 'value3' + + // Delete middle key and verify others still work + rt.delete('test')! + + if _ := rt.search('test') { + assert false, 'Expected error after deletion' + } + + value1_after := rt.search('team')! + assert value1_after.bytestr() == 'value1' + + value3_after := rt.search('testing')! + assert value3_after.bytestr() == 'value3' +} + +fn test_edge_cases() ! { + mut rt := new(path: '/tmp/radixtree_test_edge')! + + // Test empty key + rt.insert('', 'empty'.bytes())! + empty_value := rt.search('')! + assert empty_value.bytestr() == 'empty' + + // Test very long key + long_key := 'a'.repeat(1000) + rt.insert(long_key, 'long'.bytes())! + long_value := rt.search(long_key)! + assert long_value.bytestr() == 'long' + + // Test keys that require node splitting + rt.insert('test', 'value1'.bytes())! + rt.insert('testing', 'value2'.bytes())! + rt.insert('te', 'value3'.bytes())! + + value1 := rt.search('test')! + assert value1.bytestr() == 'value1' + + value2 := rt.search('testing')! + assert value2.bytestr() == 'value2' + + value3 := rt.search('te')! + assert value3.bytestr() == 'value3' +} + +fn test_multiple_operations() ! { + mut rt := new(path: '/tmp/radixtree_test_multiple')! + + // Insert multiple keys + keys := ['abc', 'abcd', 'abcde', 'bcd', 'bcde'] + for i, key in keys { + rt.insert(key, 'value${i + 1}'.bytes())! + } + + // Verify all keys + for i, key in keys { + value := rt.search(key)! + assert value.bytestr() == 'value${i + 1}' + } + + // Delete some keys + rt.delete('abcd')! + rt.delete('bcde')! + + // Verify remaining keys + remaining := ['abc', 'abcde', 'bcd'] + expected := ['value1', 'value3', 'value4'] + + for i, key in remaining { + value := rt.search(key)! + assert value.bytestr() == expected[i] + } + + // Verify deleted keys return error + deleted := ['abcd', 'bcde'] + for key in deleted { + if _ := rt.search(key) { + assert false, 'Expected error for deleted key: ${key}' + } + } +} diff --git a/lib/data/radixtree/radixtree.v b/lib/data/radixtree/radixtree.v new file mode 100644 index 00000000..b5fd0485 --- /dev/null +++ b/lib/data/radixtree/radixtree.v @@ -0,0 +1,306 @@ +module radixtree + +import freeflowuniverse.herolib.data.ourdb + +// Represents a node in the radix tree +struct Node { +mut: + key_segment string // The segment of the key stored at this node + value []u8 // Value stored at this node (empty if not a leaf) + children []NodeRef // References to child nodes + is_leaf bool // Whether this node is a leaf node +} + +// Reference to a node in the database +struct NodeRef { +mut: + key_part string // The key segment for this child + node_id u32 // Database ID of the node +} + +// RadixTree represents a radix tree data structure +pub struct RadixTree { +mut: + db &ourdb.OurDB // Database for persistent storage + root_id u32 // Database ID of the root node +} + +pub struct NewArgs { +pub mut: + path string + reset bool +} + +// Creates a new radix tree with the specified database path +pub fn new(args NewArgs) !&RadixTree { + mut db := ourdb.new( + path: args.path + record_size_max: 1024 * 4 // 4KB max record size + incremental_mode: true + reset: args.reset + )! + + mut root_id := u32(0) + println('Debug: Initializing root node') + if db.get_next_id()! == 0 { + println('Debug: Creating new root node') + root := Node{ + key_segment: '' + value: []u8{} + children: []NodeRef{} + is_leaf: false + } + root_id = db.set(data: serialize_node(root))! + println('Debug: Created root node with ID ${root_id}') + assert root_id == 0 + } else { + println('Debug: Using existing root node') + root_data := db.get(0)! + root_node := deserialize_node(root_data)! + println('Debug: Root node has ${root_node.children.len} children') + } + + return &RadixTree{ + db: &db + root_id: root_id + } +} + +// Inserts a key-value pair into the tree +pub fn (mut rt RadixTree) insert(key string, value []u8) ! { + mut current_id := rt.root_id + mut offset := 0 + + // Handle empty key case + if key.len == 0 { + mut root_node := deserialize_node(rt.db.get(current_id)!)! + root_node.is_leaf = true + root_node.value = value + rt.db.set(id: current_id, data: serialize_node(root_node))! + return + } + + for offset < key.len { + mut node := deserialize_node(rt.db.get(current_id)!)! + + // Find matching child + mut matched_child := -1 + for i, child in node.children { + if key[offset..].starts_with(child.key_part) { + matched_child = i + break + } + } + + if matched_child == -1 { + // No matching child found, create new leaf node + key_part := key[offset..] + new_node := Node{ + key_segment: key_part + value: value + children: []NodeRef{} + is_leaf: true + } + println('Debug: Creating new leaf node with key_part "${key_part}"') + new_id := rt.db.set(data: serialize_node(new_node))! + println('Debug: Created node ID ${new_id}') + + // Create new child reference and update parent node + println('Debug: Updating parent node ${current_id} to add child reference') + + // Get fresh copy of parent node + mut parent_node := deserialize_node(rt.db.get(current_id)!)! + println('Debug: Parent node initially has ${parent_node.children.len} children') + + // Add new child reference + parent_node.children << NodeRef{ + key_part: key_part + node_id: new_id + } + println('Debug: Added child reference, now has ${parent_node.children.len} children') + + // Update parent node in DB + println('Debug: Serializing parent node with ${parent_node.children.len} children') + parent_data := serialize_node(parent_node) + println('Debug: Parent data size: ${parent_data.len} bytes') + + // First verify we can deserialize the data correctly + println('Debug: Verifying serialization...') + if test_node := deserialize_node(parent_data) { + println('Debug: Serialization test successful - node has ${test_node.children.len} children') + } else { + println('Debug: ERROR - Failed to deserialize test data') + return error('Serialization verification failed') + } + + // Set with explicit ID to update existing node + println('Debug: Writing to DB...') + rt.db.set(id: current_id, data: parent_data)! + + // Verify by reading back and comparing + println('Debug: Reading back for verification...') + verify_data := rt.db.get(current_id)! + verify_node := deserialize_node(verify_data)! + println('Debug: Verification - node has ${verify_node.children.len} children') + + if verify_node.children.len == 0 { + println('Debug: ERROR - Node update verification failed!') + println('Debug: Original node children: ${node.children.len}') + println('Debug: Parent node children: ${parent_node.children.len}') + println('Debug: Verified node children: ${verify_node.children.len}') + println('Debug: Original data size: ${parent_data.len}') + println('Debug: Verified data size: ${verify_data.len}') + println('Debug: Data equal: ${verify_data == parent_data}') + return error('Node update failed - children array is empty') + } + return + } + + child := node.children[matched_child] + common_prefix := get_common_prefix(key[offset..], child.key_part) + + if common_prefix.len < child.key_part.len { + // Split existing node + mut child_node := deserialize_node(rt.db.get(child.node_id)!)! + + // Create new intermediate node + mut new_node := Node{ + key_segment: child.key_part[common_prefix.len..] + value: child_node.value + children: child_node.children + is_leaf: child_node.is_leaf + } + new_id := rt.db.set(data: serialize_node(new_node))! + + // Update current node + node.children[matched_child] = NodeRef{ + key_part: common_prefix + node_id: new_id + } + rt.db.set(id: current_id, data: serialize_node(node))! + } + + if offset + common_prefix.len == key.len { + // Update value at existing node + mut child_node := deserialize_node(rt.db.get(child.node_id)!)! + child_node.value = value + child_node.is_leaf = true + rt.db.set(id: child.node_id, data: serialize_node(child_node))! + return + } + + offset += common_prefix.len + current_id = child.node_id + } +} + +// Searches for a key in the tree +pub fn (mut rt RadixTree) search(key string) ![]u8 { + mut current_id := rt.root_id + mut offset := 0 + + // Handle empty key case + if key.len == 0 { + root_node := deserialize_node(rt.db.get(current_id)!)! + if root_node.is_leaf { + return root_node.value + } + return error('Key not found') + } + + for offset < key.len { + node := deserialize_node(rt.db.get(current_id)!)! + + mut found := false + for child in node.children { + if key[offset..].starts_with(child.key_part) { + if offset + child.key_part.len == key.len { + child_node := deserialize_node(rt.db.get(child.node_id)!)! + if child_node.is_leaf { + return child_node.value + } + } + current_id = child.node_id + offset += child.key_part.len + found = true + break + } + } + + if !found { + return error('Key not found') + } + } + + return error('Key not found') +} + +// Deletes a key from the tree +pub fn (mut rt RadixTree) delete(key string) ! { + mut current_id := rt.root_id + mut offset := 0 + mut path := []NodeRef{} + + // Find the node to delete + for offset < key.len { + node := deserialize_node(rt.db.get(current_id)!)! + + mut found := false + for child in node.children { + if key[offset..].starts_with(child.key_part) { + path << child + current_id = child.node_id + offset += child.key_part.len + found = true + + // Check if we've matched the full key + if offset == key.len { + child_node := deserialize_node(rt.db.get(child.node_id)!)! + if child_node.is_leaf { + found = true + break + } + } + break + } + } + + if !found { + return error('Key not found') + } + } + + if path.len == 0 { + return error('Key not found') + } + + // Remove the leaf node + mut last_node := deserialize_node(rt.db.get(path.last().node_id)!)! + last_node.is_leaf = false + last_node.value = []u8{} + + // If node has no children, remove it from parent + if last_node.children.len == 0 { + if path.len > 1 { + mut parent_node := deserialize_node(rt.db.get(path[path.len - 2].node_id)!)! + for i, child in parent_node.children { + if child.node_id == path.last().node_id { + parent_node.children.delete(i) + break + } + } + rt.db.set(id: path[path.len - 2].node_id, data: serialize_node(parent_node))! + } + } else { + rt.db.set(id: path.last().node_id, data: serialize_node(last_node))! + } +} + +// Helper function to get the common prefix of two strings +fn get_common_prefix(a string, b string) string { + mut i := 0 + for i < a.len && i < b.len && a[i] == b[i] { + i++ + } + return a[..i] +} diff --git a/lib/data/radixtree/radixtree_debug.v b/lib/data/radixtree/radixtree_debug.v new file mode 100644 index 00000000..c00f1ad2 --- /dev/null +++ b/lib/data/radixtree/radixtree_debug.v @@ -0,0 +1,113 @@ +module radixtree + +import freeflowuniverse.herolib.data.ourdb + +// Gets a node from the database by its ID +pub fn (mut rt RadixTree) get_node_by_id(id u32) !Node { + node_data := rt.db.get(id)! + node := deserialize_node(node_data)! + println('Debug: Retrieved node ${id} with ${node.children.len} children') + return node +} + +// Logs the current state of a node +pub fn (mut rt RadixTree) debug_node(id u32, msg string) ! { + node := rt.get_node_by_id(id)! + println('Debug: ${msg}') + println(' Node ID: ${id}') + println(' Key Segment: "${node.key_segment}"') + println(' Is Leaf: ${node.is_leaf}') + println(' Children: ${node.children.len}') + for child in node.children { + println(' - Child ID: ${child.node_id}, Key Part: "${child.key_part}"') + } +} + +// Prints the current state of the database +pub fn (mut rt RadixTree) debug_db() ! { + println('\nDatabase State:') + println('===============') + mut next_id := rt.db.get_next_id()! + for id := u32(0); id < next_id; id++ { + if data := rt.db.get(id) { + if node := deserialize_node(data) { + println('ID ${id}:') + println(' Key Segment: "${node.key_segment}"') + println(' Is Leaf: ${node.is_leaf}') + println(' Children: ${node.children.len}') + for child in node.children { + println(' - Child ID: ${child.node_id}, Key Part: "${child.key_part}"') + } + } else { + println('ID ${id}: Failed to deserialize node') + } + } else { + println('ID ${id}: No data') + } + } +} + +// Prints the tree structure starting from a given node ID +pub fn (mut rt RadixTree) print_tree_from_node(node_id u32, indent string) ! { + node := rt.get_node_by_id(node_id)! + + mut node_info := '${indent}Node(id: ${node_id})' + node_info += '\n${indent}├── key_segment: "${node.key_segment}"' + node_info += '\n${indent}├── is_leaf: ${node.is_leaf}' + if node.is_leaf { + node_info += '\n${indent}├── value: ${node.value.bytestr()}' + } + node_info += '\n${indent}└── children: ${node.children.len}' + if node.children.len > 0 { + node_info += ' [' + for i, child in node.children { + if i > 0 { + node_info += ', ' + } + node_info += '${child.node_id}:${child.key_part}' + } + node_info += ']' + } + println(node_info) + + // Print children recursively with increased indentation + for i, child in node.children { + is_last := i == node.children.len - 1 + child_indent := if is_last { + indent + ' ' + } else { + indent + '│ ' + } + rt.print_tree_from_node(child.node_id, child_indent)! + } +} + +// Prints the entire tree structure starting from root +pub fn (mut rt RadixTree) print_tree() ! { + println('\nRadix Tree Structure:') + println('===================') + rt.print_tree_from_node(rt.root_id, '')! +} + +// Gets detailed information about a specific node +pub fn (mut rt RadixTree) get_node_info(id u32) !string { + node := rt.get_node_by_id(id)! + + mut info := 'Node Details:\n' + info += '=============\n' + info += 'ID: ${id}\n' + info += 'Key Segment: "${node.key_segment}"\n' + info += 'Is Leaf: ${node.is_leaf}\n' + if node.is_leaf { + info += 'Value: ${node.value}\n' + } + info += 'Number of Children: ${node.children.len}\n' + if node.children.len > 0 { + info += '\nChildren:\n' + for child in node.children { + info += '- ID: ${child.node_id}, Key Part: "${child.key_part}"\n' + } + } + + return info +} diff --git a/lib/data/radixtree/serialize.v b/lib/data/radixtree/serialize.v new file mode 100644 index 00000000..736f59c1 --- /dev/null +++ b/lib/data/radixtree/serialize.v @@ -0,0 +1,75 @@ +module radixtree + +import freeflowuniverse.herolib.data.encoder + +const version = u8(1) // Current binary format version + +// Serializes a node to bytes for storage +fn serialize_node(node Node) []u8 { + mut e := encoder.new() + + // Add version byte + e.add_u8(version) + + // Add key segment + e.add_string(node.key_segment) + + // Add value as []u8 + e.add_u16(u16(node.value.len)) + e.data << node.value + + // Add children + e.add_u16(u16(node.children.len)) + for child in node.children { + e.add_string(child.key_part) + e.add_u32(child.node_id) + } + + // Add leaf flag + e.add_u8(if node.is_leaf { u8(1) } else { u8(0) }) + + return e.data +} + +// Deserializes bytes to a node +fn deserialize_node(data []u8) !Node { + mut d := encoder.decoder_new(data) + + // Read and verify version + version_byte := d.get_u8() + if version_byte != version { + return error('Invalid version byte: expected ${version}, got ${version_byte}') + } + + // Read key segment + key_segment := d.get_string() + + // Read value as []u8 + value_len := d.get_u16() + mut value := []u8{len: int(value_len)} + for i in 0 .. int(value_len) { + value[i] = d.get_u8() + } + + // Read children + children_len := d.get_u16() + mut children := []NodeRef{cap: int(children_len)} + for _ in 0 .. children_len { + key_part := d.get_string() + node_id := d.get_u32() + children << NodeRef{ + key_part: key_part + node_id: node_id + } + } + + // Read leaf flag + is_leaf := d.get_u8() == 1 + + return Node{ + key_segment: key_segment + value: value + children: children + is_leaf: is_leaf + } +} diff --git a/lib/data/radixtree/serialize_test.v b/lib/data/radixtree/serialize_test.v new file mode 100644 index 00000000..1ffe0e2f --- /dev/null +++ b/lib/data/radixtree/serialize_test.v @@ -0,0 +1,110 @@ +module radixtree + +fn test_serialize_deserialize() { + // Create a test node with children + node := Node{ + key_segment: 'test' + value: 'hello world'.bytes() + children: [ + NodeRef{ + key_part: 'child1' + node_id: 1 + }, + NodeRef{ + key_part: 'child2' + node_id: 2 + }, + ] + is_leaf: true + } + + // Serialize + data := serialize_node(node) + + // Verify version byte + assert data[0] == version + + // Deserialize + decoded := deserialize_node(data)! + + // Verify all fields match + assert decoded.key_segment == node.key_segment + assert decoded.value == node.value + assert decoded.is_leaf == node.is_leaf + assert decoded.children.len == node.children.len + + // Verify children + assert decoded.children[0].key_part == node.children[0].key_part + assert decoded.children[0].node_id == node.children[0].node_id + assert decoded.children[1].key_part == node.children[1].key_part + assert decoded.children[1].node_id == node.children[1].node_id +} + +fn test_empty_node() { + // Test node with empty values + node := Node{ + key_segment: '' + value: []u8{} + children: []NodeRef{} + is_leaf: false + } + + data := serialize_node(node) + decoded := deserialize_node(data)! + + assert decoded.key_segment == node.key_segment + assert decoded.value == node.value + assert decoded.children == node.children + assert decoded.is_leaf == node.is_leaf +} + +fn test_large_values() { + // Create large test data + mut large_value := []u8{len: 1000, init: u8(index & 0xFF)} + mut children := []NodeRef{cap: 100} + for i in 0 .. 100 { + children << NodeRef{ + key_part: 'child${i}' + node_id: u32(i) + } + } + + node := Node{ + key_segment: 'large_test' + value: large_value + children: children + is_leaf: true + } + + data := serialize_node(node) + decoded := deserialize_node(data)! + + assert decoded.key_segment == node.key_segment + assert decoded.value == node.value + assert decoded.children.len == node.children.len + + // Verify some random children + assert decoded.children[0] == node.children[0] + assert decoded.children[50] == node.children[50] + assert decoded.children[99] == node.children[99] +} + +fn test_invalid_version() { + node := Node{ + key_segment: 'test' + value: []u8{} + children: []NodeRef{} + is_leaf: false + } + + mut data := serialize_node(node) + // Corrupt version byte + data[0] = 255 + + // Should return error for version mismatch + if result := deserialize_node(data) { + assert false, 'Expected error for invalid version byte' + } else { + assert err.msg() == 'Invalid version byte: expected ${version}, got 255' + } +} diff --git a/lib/develop/gittools/factory.v b/lib/develop/gittools/factory.v index c6af0525..47436e6e 100644 --- a/lib/develop/gittools/factory.v +++ b/lib/develop/gittools/factory.v @@ -37,7 +37,7 @@ pub fn new(args_ GitStructureArgsNew) !&GitStructure { ssh_key_name: args.ssh_key_name } - return get(coderoot: args.coderoot,reload:args.reload,cfg:cfg) + return get(coderoot: args.coderoot, reload: args.reload, cfg: cfg) } @[params] @@ -45,7 +45,7 @@ pub struct GitStructureArgGet { pub mut: coderoot string reload bool - cfg ?GitStructureConfig + cfg ?GitStructureConfig } // Retrieve a GitStructure instance based on the given arguments. @@ -55,6 +55,11 @@ pub fn get(args_ GitStructureArgGet) !&GitStructure { args.coderoot = '${os.home_dir()}/code' } + // make sure coderoot exists + if !os.exists(args.coderoot) { + os.mkdir_all(args.coderoot)! + } + rediskey_ := cache_key(args.coderoot) // Return existing instance if already created. @@ -72,7 +77,7 @@ pub fn get(args_ GitStructureArgGet) !&GitStructure { coderoot: pathlib.get_dir(path: args.coderoot, create: true)! } - gs.config()! //will load the config, don't remove + gs.config()! // will load the config, don't remove gs.load(false)! if gs.repos.keys().len == 0 || args.reload { @@ -83,4 +88,3 @@ pub fn get(args_ GitStructureArgGet) !&GitStructure { return gsinstances[rediskey_] or { panic('bug') } } - diff --git a/lib/develop/gittools/gitstructure.v b/lib/develop/gittools/gitstructure.v index 9c627110..d7635345 100644 --- a/lib/develop/gittools/gitstructure.v +++ b/lib/develop/gittools/gitstructure.v @@ -9,27 +9,25 @@ import json pub struct GitStructureConfig { pub mut: - coderoot string //just to be informative, its not used + coderoot string // just to be informative, its not used light bool = true // If true, clones only the last history for all branches (clone with only 1 level deep) log bool = true // If true, logs git commands/statements debug bool = true ssh_key_name string } - // GitStructure holds information about repositories within a specific code root. // This structure keeps track of loaded repositories, their configurations, and their status. @[heap] pub struct GitStructure { mut: - config_ ?GitStructureConfig // Configuration settings for the git structure. + config_ ?GitStructureConfig // Configuration settings for the git structure. pub mut: key string // Unique key representing the git structure (default is hash of $home/code). repos map[string]&GitRepo // Map of repositories - coderoot pathlib.Path + coderoot pathlib.Path } - ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// @@ -41,53 +39,31 @@ pub mut: pub fn (mut gitstructure GitStructure) load(reload bool) ! { mut processed_paths := []string{} - if reload{ - gitstructure.repos=map[string]&GitRepo{} + if reload { + gitstructure.repos = map[string]&GitRepo{} } gitstructure.load_recursive(gitstructure.coderoot.path, mut processed_paths)! - if reload{ + if reload { gitstructure.cache_reset()! } - // mut ths := []thread !{} - //need to make sure redis is empty before doing the threads redisclient.reset()! - redisclient.checkempty() + redisclient.checkempty() for _, mut repo in gitstructure.repos { // mut myfunction := fn (mut repo GitRepo) ! { // } - //ths << spawn myfunction(mut repo_) + // ths << spawn myfunction(mut repo_) repo.status_update(reload: reload) or { - msg:="Error in git repo: ${repo.path()}\n${err}" + msg := 'Error in git repo: ${repo.path()}\n${err}' console.print_stderr(msg) return error(msg) } } - - // pp.work_on_items(todo) - //console.print_debug('loaded all threads for git on ${gitstructure.coderoot}') - // for th in ths { - // th.wait()! - // } - - // for x in pp.get_results[SResult]() { - // println('result: ${x.s}') - // } - - // console.print_debug("threads finished") - - //now we need to load them back in our memory because these were done in sub process - // for _, mut r in gitstructure.repos { - // r.cache_get()! - // } - - - // gitstructure.init()! } -// Recursively loads repositories from the provided path, updating their statuses, does not check the status +// Recursively loads repositories from the provided path, updating their statuses, does not check the status // // Args: // - path (string): The path to search for repositories. @@ -136,7 +112,6 @@ fn (mut gitstructure GitStructure) load_recursive(path string, mut processed_pat } } - @[params] pub struct RepoInitParams { ssh_key_name string // name of ssh key to be used in repo @@ -192,76 +167,72 @@ pub fn (mut gitstructure GitStructure) get_working_repo() ?GitRepo { return gitstructure.repo_init_from_path_(curdir.path) or { return none } } - -//key in redis used to store all config info +// key in redis used to store all config info fn cache_key(coderoot string) string { key := md5.hexhash(coderoot) return 'git:${key}' } -//key in redis used to store all config info +// key in redis used to store all config info pub fn (mut self GitStructure) cache_key() string { return cache_key(self.coderoot.path) } -//load from cache +// load from cache pub fn (mut self GitStructure) cache_load() ! { // Retrieve the configuration from Redis. mut redis := redis_get() - keys := redis.keys("${self.cache_key()}:repos")! - self.repos = map[string]&GitRepo{} //reset + keys := redis.keys('${self.cache_key()}:repos')! + self.repos = map[string]&GitRepo{} // reset for key in keys { - data:=redis.get(key)! - mut r:=json.decode(GitRepo,data)! + data := redis.get(key)! + mut r := json.decode(GitRepo, data)! self.repos[key] = &r } } - // Reset all caches and configurations for all Git repositories. pub fn (mut self GitStructure) cache_reset() ! { mut redis := redis_get() - keys := redis.keys("${self.cache_key()}:**")! + keys := redis.keys('${self.cache_key()}:**')! for key in keys { redis.del(key)! } } - // Load config from redis fn (mut self GitStructure) coderoot() !pathlib.Path { - mut coderoot := pathlib.get_dir(path:self.coderoot.path,create:true)! + mut coderoot := pathlib.get_dir(path: self.coderoot.path, create: true)! return coderoot } - ////// CONFIG // Load config from redis pub fn (mut self GitStructure) config() !GitStructureConfig { - mut config := self.config_ or { + mut config := self.config_ or { mut redis := redis_get() - data:=redis.get("${self.cache_key()}:config")! - mut c:= GitStructureConfig{} - if data.len>0{ - c = json.decode(GitStructureConfig,data)! + data := redis.get('${self.cache_key()}:config')! + mut c := GitStructureConfig{} + if data.len > 0 { + c = json.decode(GitStructureConfig, data)! } - c + c } + return config } // Reset the configuration cache for Git structures. pub fn (mut self GitStructure) config_reset() ! { mut redis := redis_get() - redis.del("${self.cache_key()}:config")! + redis.del('${self.cache_key()}:config')! } - -//save to the cache +// save to the cache pub fn (mut self GitStructure) config_save() ! { // Retrieve the configuration from Redis. mut redis := redis_get() datajson := json.encode(self.config) - redis.set("${self.cache_key()}:config", datajson)! + redis.set('${self.cache_key()}:config', datajson)! } diff --git a/lib/develop/gittools/gittools_do.v b/lib/develop/gittools/gittools_do.v index d8fa1712..f3798d36 100644 --- a/lib/develop/gittools/gittools_do.v +++ b/lib/develop/gittools/gittools_do.v @@ -20,8 +20,10 @@ pub mut: msg string url string branch string + path string // path to start from recursive bool pull bool + reload bool // means reload the info into the cache script bool = true // run non interactive reset bool = true // means we will lose changes (only relevant for clone, pull) } @@ -37,16 +39,21 @@ pub mut: // msg string // url string // pull bool +// reload bool //means reload the info into the cache // script bool = true // run non interactive // reset bool = true // means we will lose changes (only relevant for clone, pull) //``` pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string { mut args := args_ - console.print_debug('git do ${args.cmd}') + // console.print_debug('git do ${args.cmd}') + if args.path == '' { + args.path = os.getwd() + } + + // see if its one repo we are in, based on current path if args.repo == '' && args.account == '' && args.provider == '' && args.filter == '' { - curdir := os.getwd() - mut curdiro := pathlib.get_dir(path: curdir, create: false)! + mut curdiro := pathlib.get_dir(path: args.path, create: false)! mut parentpath := curdiro.parent_find('.git') or { pathlib.Path{} } if parentpath.path != '' { r0 := gs.repo_init_from_path_(parentpath.path)! @@ -55,15 +62,33 @@ pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string { args.provider = r0.provider } } + // see if a url was used means we are in 1 repo + if args.url.len > 0 { + if !(args.repo == '' && args.account == '' && args.provider == '' && args.filter == '') { + return error('when specify url cannot specify repo, account, profider or filter') + } + mut r0 := gs.get_repo(url: args.url)! + args.repo = r0.name + args.account = r0.account + args.provider = r0.provider + } args.cmd = args.cmd.trim_space().to_lower() mut ui := gui.new()! - if args.cmd == 'reload' { - console.print_header(' - reload gitstructure ${gs.config()!.coderoot}') - gs.load(true)! - return '' + mut repos := gs.get_repos( + filter: args.filter + name: args.repo + account: args.account + provider: args.provider + )! + + // reset the status for the repo + if args.reload { + for mut repo in repos { + repo.cache_last_load_clear()! + } } if args.cmd == 'list' { @@ -76,42 +101,7 @@ pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string { return '' } - mut repos := gs.get_repos( - filter: args.filter - name: args.repo - account: args.account - provider: args.provider - )! - - if args.url.len > 0 { - mut g := gs.get_repo(url: args.url)! - g.load()! - if args.cmd == 'cd' { - return g.path() - } - if args.reset { - g.remove_changes()! - } - if args.cmd == 'pull' || args.pull { - g.pull()! - } - if args.cmd == 'push' { - if g.need_commit()! { - if args.msg.len == 0 { - return error('please specify message with -m ...') - } - g.commit(args.msg)! - } - g.push()! - } - if args.cmd == 'pull' || args.cmd == 'clone' || args.cmd == 'push' { - gpath := g.path() - console.print_debug('git do ok, on path ${gpath}') - return gpath - } - repos = [g] - } - + // means we are on 1 repo if args.cmd in 'sourcetree,edit'.split(',') { if repos.len == 0 { return error('please specify at least 1 repo for cmd:${args.cmd}') @@ -138,9 +128,9 @@ pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string { provider: args.provider )! - mut need_commit := false - mut need_pull := false - mut need_push := false + mut need_commit0 := false + mut need_pull0 := false + mut need_push0 := false if repos.len == 0 { console.print_header(' - nothing to do.') @@ -149,26 +139,32 @@ pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string { // check on repos who needs what for mut g in repos { - g.load()! - // console.print_debug(st) - need_commit = g.need_commit()! || need_commit - if args.cmd == 'push' && need_commit { - need_push = true + if args.cmd == 'push' && g.need_push_or_pull()! { + need_push0 = true + } + + if args.cmd in ['push', 'pull'] && (need_push0 || g.need_push_or_pull()!) { + need_pull0 = true + } + + if args.cmd in ['push', 'pull', 'commit'] && (g.need_commit()!) { + need_commit0 = true } - need_pull = args.cmd in 'pull,push'.split(',') // always do pull when push and pull - need_push = args.cmd == 'push' && (g.need_push_or_pull()! || need_push) } + // console.print_debug(" --- status all repo's\n need_commit0:${need_commit0} \n need_pull0:${need_pull0} \n need_push0:${need_push0}") + // exit(0) + mut ok := false - if need_commit || need_pull || need_push { + if need_commit0 || need_pull0 || need_push0 { mut out := '\n ** NEED TO ' - if need_commit { + if need_commit0 { out += 'COMMIT ' } - if need_pull { + if need_pull0 { out += 'PULL ' } - if need_push { + if need_push0 { out += 'PUSH ' } if args.reset { @@ -178,91 +174,88 @@ pub fn (mut gs GitStructure) do(args_ ReposActionsArgs) !string { if args.script { ok = true } else { - ok = ui.ask_yesno(question: 'Is above ok?')! + if need_commit0 || need_pull0 || need_push0 { + ok = ui.ask_yesno(question: 'Is above ok?')! + } else { + console.print_green('nothing to do') + } + } + + if ok == false { + return error('cannot continue with action, you asked me to stop.\n${args}') + } + + if need_commit0 { + if args.msg.len == 0 && args.script { + return error('message needs to be specified for commit.') + } + if args.msg.len == 0 { + args.msg = ui.ask_question( + question: 'commit message for the actions: ' + )! + } } } + if args.cmd == 'delete' { if args.script { ok = true } else { ok = ui.ask_yesno(question: 'Is it ok to delete above repos? (DANGEROUS)')! } - } - if ok == false { - return error('cannot continue with action, you asked me to stop.\n${args}') - } - - // mut changed := false - - mut ths := []thread !bool{} - for mut g in repos { - ths << spawn fn (mut g GitRepo, args ReposActionsArgs, need_commit bool, need_push bool, shared ui generic.UserInterface) !bool { - redisclient.reset()! - redisclient.checkempty() - mut has_changed := false - need_commit_repo := (g.need_commit()! || need_commit) - && args.cmd in 'commit,pull,push'.split(',') - need_pull_repo := args.cmd in 'pull,push'.split(',') // always do pull when push and pull - need_push_repo := args.cmd in 'push'.split(',') - && (g.need_push_or_pull()! || need_push) - // console.print_debug(" --- git_do ${g.addr.name} ${st.need_commit} ${st.need_pull} ${st.need_push}") - - if need_commit_repo { - mut msg := args.msg - if msg.len == 0 { - if args.script { - return error('message needs to be specified for commit.') - } - - lock ui { - msg = ui.ask_question( - question: 'commit message for repo: ${g.account}/${g.name} ' - )! - } - } - console.print_header(' - commit ${g.account}/${g.name}') - g.commit(msg)! - has_changed = true - } - if need_pull_repo { - if args.reset { - console.print_header(' - remove changes ${g.account}/${g.name}') - g.remove_changes()! - } - console.print_header(' - pull ${g.account}/${g.name}') - g.pull()! - has_changed = true - } - if need_push_repo { - console.print_header(' - push ${g.account}/${g.name}') - g.push()! - has_changed = true - } - if args.cmd == 'delete' { - g.delete()! - has_changed = true - } - - return has_changed - }(mut g, args, need_commit, need_push, shared &ui) - } - - for th in ths { - has_changed := th.wait()! - if has_changed { - // console.clear() - console.print_header('\nCompleted required actions.\n') - - gs.repos_print( - filter: args.filter - name: args.repo - account: args.account - provider: args.provider - )! + if ok == false { + return error('cannot continue with action, you asked me to stop.\n${args}') } } + mut has_changed := false + for mut g in repos { + need_push_repo := need_push0 && g.need_push_or_pull()! + need_pull_repo := need_push_repo || (need_pull0 && g.need_push_or_pull()!) + need_commit_repo := need_push_repo || need_pull_repo + || (need_commit0 && g.need_commit()!) + + // console.print_debug(" --- git_do ${g.cache_key()} \n need_commit_repo:${need_commit_repo} \n need_pull_repo:${need_pull_repo} \n need_push_repo:${need_push_repo}") + + if need_commit_repo { + mut msg := args.msg + console.print_header(' - commit ${g.account}/${g.name}') + g.commit(msg)! + has_changed = true + } + if need_pull_repo { + if args.reset { + console.print_header(' - remove changes ${g.account}/${g.name}') + g.remove_changes()! + } + console.print_header(' - pull ${g.account}/${g.name}') + g.pull()! + has_changed = true + } + if need_push_repo { + console.print_header(' - push ${g.account}/${g.name}') + g.push()! + has_changed = true + } + if args.cmd == 'delete' { + g.delete()! + has_changed = true + } + } + + if has_changed { + // console.clear() + console.print_header('\nCompleted required actions.\n') + + gs.repos_print( + filter: args.filter + name: args.repo + account: args.account + provider: args.provider + )! + } + return '' } // end for the commit, pull, push, delete diff --git a/lib/develop/gittools/repos_get.v b/lib/develop/gittools/repos_get.v index 9b4a20e8..9f6dc5e6 100644 --- a/lib/develop/gittools/repos_get.v +++ b/lib/develop/gittools/repos_get.v @@ -8,14 +8,15 @@ import time @[params] pub struct ReposGetArgs { pub mut: - filter string // Optional filter for repository names - name string // Specific repository name to retrieve. - account string // Git account associated with the repository. - provider string // Git provider (e.g., GitHub). - pull bool // Pull the last changes. - reset bool // Reset the changes. - reload bool // Reload the repo into redis cache - url string // Repository URL + filter string // Optional filter for repository names + name string // Specific repository name to retrieve. + account string // Git account associated with the repository. + provider string // Git provider (e.g., GitHub). + pull bool // Pull the last changes. + reset bool // Reset the changes. + status_clean bool // make sure each cache status is but on 0, if we also do status_update this will result in a reload + status_update bool // make sure each repo get's status updated + url string // Repository URL } // Retrieves a list of repositories from the git structure that match the provided arguments. @@ -28,9 +29,10 @@ pub mut: // name string // Specific repository name to retrieve. // account string // Git account associated with the repository. // provider string // Git provider (e.g., GitHub). -// pull bool // Pull the last changes. -// reset bool // Reset the changes. -// reload bool // Reload the repo into redis cache +// pull bool // Pull the last changes. +// reset bool // Reset the changes +// status_clean bool //make sure each cache status is but on 0, if we also do status_update this will result in a reload +// status_update bool //make sure each repo get's status updated // url string // Repository URL, used if cloning is needed. //``` // Returns: @@ -41,13 +43,6 @@ pub fn (mut gitstructure GitStructure) get_repos(args_ ReposGetArgs) ![]&GitRepo mut res := []&GitRepo{} for _, repo in gitstructure.repos { - relpath := repo.get_relative_path()! - - if args.filter != '' && relpath.contains(args.filter) { - res << repo - continue - } - if args.url.len > 0 { // if being mathed from url load repo info git_location := gitstructure.gitlocation_from_url(args.url)! @@ -55,34 +50,26 @@ pub fn (mut gitstructure GitStructure) get_repos(args_ ReposGetArgs) ![]&GitRepo args.provider = git_location.provider args.name = git_location.name } - if repo_match_check(repo, args) { + + if repo_match_check(repo, args)! { res << repo } } - // operate per repo on thread based on args - mut ths := []thread !{} for mut repo in res { - // check redis cache outside, in threads is problematic - repo.cache_get() or { return error('failed to get repo cache ${err}') } - if time.since(time.unix(repo.last_load)) > 24 * time.hour { - args.reload = true + if args.status_clean { + repo.cache_last_load_clear()! } - ths << spawn fn (mut repo GitRepo, args ReposGetArgs) ! { - redisclient.reset()! - redisclient.checkempty() + if args.status_update { + repo.status_update()! + } + if args.reset { + repo.reset()! + } else { if args.pull { repo.pull()! - } else if args.reset { - repo.reset()! - } else if args.reload { - repo.load()! } - }(mut repo, args) - } - - for th in ths { - th.wait()! + } } return res @@ -138,8 +125,43 @@ pub fn (mut gitstructure GitStructure) get_repo(args_ ReposGetArgs) !&GitRepo { // // Returns: // - bool: True if the repository matches, false otherwise. -fn repo_match_check(repo GitRepo, args ReposGetArgs) bool { - return (args.name.len == 0 || repo.name == args.name) +fn repo_match_check(repo GitRepo, args ReposGetArgs) !bool { + mut r := (args.name.len == 0 || repo.name == args.name) && (args.account.len == 0 || repo.account == args.account) && (args.provider.len == 0 || repo.provider == args.provider) + relpath := repo.get_relative_path()! + if r { + if args.filter != '' && !(relpath.contains(args.filter)) { + return false + } + } + + return r +} + +// Retrieves a single repository path based on the provided arguments (goes inside repo). +// if pull will force a pull, if it can't will be error, if reset will remove the changes +// If the repository does not exist, it will clone it +// +// Args: +//``` +// ReposGetArgs { +// name string // Specific repository name to retrieve. +// account string // Git account associated with the repository. +// provider string // Git provider (e.g., GitHub). +// pull bool // Pull the last changes. +// reset bool // Reset the changes. +// reload bool // Reload the repo into redis cache +// url string // Repository URL, used if cloning is needed. +//``` +// +// Returns: +// - &GitRepo: Reference to the retrieved or cloned repository. +// +// Raises: +// - Error: If multiple repositories are found with similar names or if cloning fails. +pub fn (mut gitstructure GitStructure) get_path(args_ ReposGetArgs) !string { + mut r := gitstructure.get_repo(args_)! + mut mypath := r.get_path_of_url(args_.url)! + return mypath } diff --git a/lib/develop/gittools/repos_print.v b/lib/develop/gittools/repos_print.v index e3ebaa2c..bdfd60e6 100644 --- a/lib/develop/gittools/repos_print.v +++ b/lib/develop/gittools/repos_print.v @@ -14,7 +14,7 @@ fn get_repo_status(gr GitRepo) !string { if repo.need_push_or_pull()! { statuses << 'PULL' } - + return statuses.join(', ') } @@ -41,6 +41,7 @@ pub fn (mut gitstructure GitStructure) repos_print(args ReposGetArgs) ! { // Collect repository information based on the provided criteria for _, repo in gitstructure.get_repos(args)! { + // repo.status_update()! repo_data << format_repo_info(repo)! } @@ -54,7 +55,7 @@ pub fn (mut gitstructure GitStructure) repos_print(args ReposGetArgs) ! { // } else { // 'Repositories: ${gitstructure.config()!.coderoot}' // } - header:='Repositories: ${gitstructure.config()!.coderoot}' + header := 'Repositories: ${gitstructure.config()!.coderoot}' console.print_header(header) // Print the repository information in a formatted array diff --git a/lib/develop/gittools/repository.v b/lib/develop/gittools/repository.v index 55ab2c2d..11844d9e 100644 --- a/lib/develop/gittools/repository.v +++ b/lib/develop/gittools/repository.v @@ -18,7 +18,7 @@ pub mut: config GitRepoConfig // Repository-specific configuration last_load int // Epoch timestamp of the last load from reality deploysshkey string // to use with git - has_changes bool + has_changes bool } // this is the status we want, we need to work towards off @@ -86,10 +86,10 @@ pub fn (mut repo GitRepo) commit(msg string) ! { return error('Cannot commit repo: ${repo_path}. Error: ${err}') } console.print_green('Changes committed successfully.') + repo.load()! } else { console.print_debug('No changes to commit.') } - repo.load()! } // Push local changes to the remote repository. @@ -104,7 +104,6 @@ pub fn (mut repo GitRepo) push() ! { repo.load()! } else { console.print_header('Everything is up to date.') - repo.load()! } } @@ -121,9 +120,7 @@ pub fn (mut repo GitRepo) pull(args_ PullCheckoutArgs) ! { repo.checkout()! } - repo.exec('git pull') or { - return error('Cannot pull repo: ${repo.path()}. Error: ${err}') - } + repo.exec('git pull') or { return error('Cannot pull repo: ${repo.path()}. Error: ${err}') } if args_.submodules { repo.update_submodules()! @@ -148,6 +145,7 @@ pub fn (mut repo GitRepo) checkout() ! { if repo.status_wanted.branch.len > 0 { repo.exec('git checkout ${repo.status_wanted.branch}')! } + repo.cache_last_load_clear()! } // Create a new branch in the repository. @@ -155,6 +153,7 @@ pub fn (mut repo GitRepo) branch_create(branchname string) ! { repo.exec('git branch -c ${branchname}') or { return error('Cannot Create branch: ${repo.path()} to ${branchname}\nError: ${err}') } + repo.cache_last_load_clear()! console.print_green('Branch ${branchname} created successfully.') } @@ -196,9 +195,10 @@ pub fn (mut repo GitRepo) tag_exists(tag string) !bool { // Deletes the Git repository pub fn (mut repo GitRepo) delete() ! { repo_path := repo.path() + key := repo.cache_key() repo.cache_delete()! osal.rm(repo_path)! - repo.load()! + repo.gs.repos.delete(key) // Remove from GitStructure's repos map } // Create GitLocation from the path within the Git repository @@ -207,12 +207,19 @@ pub fn (mut gs GitRepo) gitlocation_from_path(path string) !GitLocation { return error('Path must be relative, cannot start with / or ~') } - // TODO: check that path is inside gitrepo - // TODO: get relative path in relation to root of gitrepo - mut git_path := gs.patho()! - if !os.exists(git_path.path) { - return error('Path does not exist inside the repository: ${git_path.path}') + repo_path := git_path.path + abs_path := os.abs_path(path) + + // Check if path is inside git repo + if !abs_path.starts_with(repo_path) { + return error('Path ${path} is not inside the git repository at ${repo_path}') + } + + // Get relative path in relation to root of gitrepo + rel_path := abs_path[repo_path.len + 1..] // +1 to skip the trailing slash + if !os.exists(abs_path) { + return error('Path does not exist inside the repository: ${abs_path}') } mut branch_or_tag := gs.status_wanted.branch @@ -225,7 +232,7 @@ pub fn (mut gs GitRepo) gitlocation_from_path(path string) !GitLocation { account: gs.account name: gs.name branch_or_tag: branch_or_tag - path: path // relative path in relation to git repo + path: rel_path // relative path in relation to git repo } } @@ -246,12 +253,18 @@ pub fn (mut repo GitRepo) init() ! { return error('Path does not exist: ${path_string}') } - // TODO: check deploy key has been set in repo - // if not do git config core.sshCommand "ssh -i /path/to/deploy_key" + // Check if deploy key is set in repo config if repo.deploysshkey.len > 0 { - repo.set_sshkey(repo.deploysshkey)! + git_config := repo.exec('git config --get core.sshCommand') or { '' } + if !git_config.contains(repo.deploysshkey) { + repo.set_sshkey(repo.deploysshkey)! + } + } + + // Check that either tag or branch is set on wanted, but not both + if repo.status_wanted.tag.len > 0 && repo.status_wanted.branch.len > 0 { + return error('Cannot set both tag and branch in wanted status. Choose one or the other.') } - // TODO: check tag or branch set on wanted, and not both } // Set the ssh key on the repo @@ -263,7 +276,7 @@ fn (mut repo GitRepo) set_sshkey(key_name string) ! { } private_key := key.private_key_path()! - _ := 'git config core.sshcommand "ssh -i ~/.ssh/${private_key.path}"' + repo.exec('git config core.sshcommand "ssh -i ~/.ssh/${private_key.path}"')! repo.deploysshkey = key_name } @@ -299,7 +312,7 @@ fn (mut repo GitRepo) update_submodules() ! { fn (repo GitRepo) exec(cmd_ string) !string { repo_path := repo.path() cmd := 'cd ${repo_path} && ${cmd_}' - //console.print_debug(cmd) + // console.print_debug(cmd) r := os.execute(cmd) if r.exit_code != 0 { return error('Repo failed to exec cmd: ${cmd}\n${r.output})') diff --git a/lib/develop/gittools/repository_cache.v b/lib/develop/gittools/repository_cache.v index 16823716..21405273 100644 --- a/lib/develop/gittools/repository_cache.v +++ b/lib/develop/gittools/repository_cache.v @@ -21,9 +21,7 @@ fn (mut repo GitRepo) cache_get() ! { mut repo_json := '' mut redis_client := redis_get() cache_key := repo.cache_key() - repo_json = redis_client.get(cache_key) or { - return - } + repo_json = redis_client.get(cache_key) or { return } if repo_json.len > 0 { mut cached := json.decode(GitRepo, repo_json)! @@ -40,3 +38,10 @@ fn (mut repo GitRepo) cache_delete() ! { // TODO: report v bug, function should work without return as well return } + +// put the data of last load on 0, means first time a git status check will be done it will update its info +fn (mut repo GitRepo) cache_last_load_clear() ! { + repo.cache_get()! + repo.last_load = 0 + repo.cache_set()! +} diff --git a/lib/develop/gittools/repository_clone.v b/lib/develop/gittools/repository_clone.v index 27796762..4831fb60 100644 --- a/lib/develop/gittools/repository_clone.v +++ b/lib/develop/gittools/repository_clone.v @@ -1,7 +1,6 @@ module gittools import freeflowuniverse.herolib.ui.console - import os @[params] diff --git a/lib/develop/gittools/repository_info.v b/lib/develop/gittools/repository_info.v index c9675921..ad8901cd 100644 --- a/lib/develop/gittools/repository_info.v +++ b/lib/develop/gittools/repository_info.v @@ -35,7 +35,6 @@ pub fn (repo GitRepo) get_changes_staged() ![]string { // Check if there are any unstaged or untracked changes in the repository. pub fn (mut repo GitRepo) detect_changes() !bool { - repo.status_update()! r0 := repo.get_changes_unstaged()! r1 := repo.get_changes_staged()! if r0.len + r1.len > 0 { @@ -58,7 +57,7 @@ pub fn (mut repo GitRepo) need_push_or_pull() !bool { last_local_commit := repo.get_last_local_commit() or { return error('Failed to get last local commit: ${err}') } - //println('commit status: ${repo.name} ${last_local_commit} ${last_remote_commit}') + // println('commit status: ${repo.name} ${last_local_commit} ${last_remote_commit}') return last_local_commit != last_remote_commit } diff --git a/lib/develop/gittools/repository_load.v b/lib/develop/gittools/repository_load.v index 82046ae1..051d57ec 100644 --- a/lib/develop/gittools/repository_load.v +++ b/lib/develop/gittools/repository_load.v @@ -3,92 +3,114 @@ module gittools import time import freeflowuniverse.herolib.ui.console import os + @[params] pub struct StatusUpdateArgs { - reload bool + reload bool } pub fn (mut repo GitRepo) status_update(args StatusUpdateArgs) ! { // Check current time vs last check, if needed (check period) then load - // println("${repo.name} ++") - repo.cache_get()! // Ensure we have the situation from redis - repo.init()! + repo.cache_get() or { return error('Failed to get cache for repo ${repo.name}: ${err}') } // Ensure we have the situation from redis + repo.init() or { return error('Failed to initialize repo ${repo.name}: ${err}') } current_time := int(time.now().unix()) if args.reload || repo.last_load == 0 || current_time - repo.last_load >= repo.config.remote_check_period { - console.print_debug('${repo.name} ${current_time}-${repo.last_load}: ${repo.config.remote_check_period} +++') + // console.print_debug('${repo.name} ${current_time}-${repo.last_load} (${current_time - repo.last_load >= repo.config.remote_check_period}): ${repo.config.remote_check_period} +++') // if true{exit(0)} - repo.load()! - // println("${repo.name} ++++") + repo.load() or { return error('Failed to load repository ${repo.name}: ${err}') } } } // Load repo information // Does not check cache, it is the callers responsibility to check cache and load accordingly. fn (mut repo GitRepo) load() ! { - console.print_debug('load ${repo.cache_key()}') - repo.init()! - if os.exists("${repo.path()}/.git") == false{ - return error("Can't find git in repo ${repo.path()}") + console.print_header('load ${repo.print_key()}') + repo.init() or { return error('Failed to initialize repo during load operation: ${err}') } + + git_path := '${repo.path()}/.git' + if os.exists(git_path) == false { + return error('Repository not found: ${repo.path()} is not a valid git repository (missing .git directory)') } + repo.exec('git fetch --all') or { - return error('Cannot fetch repo: ${repo.path()}. Error: ${err}') + return error('Failed to fetch updates for ${repo.name} at ${repo.path()}: ${err}. Please check network connection and repository access.') } - repo.load_branches()! - repo.load_tags()! + + repo.load_branches() or { return error('Failed to load branches for ${repo.name}: ${err}') } + + repo.load_tags() or { return error('Failed to load tags for ${repo.name}: ${err}') } + repo.last_load = int(time.now().unix()) - repo.has_changes = repo.detect_changes()! - repo.cache_set()! + + repo.has_changes = repo.detect_changes() or { + return error('Failed to detect changes in repository ${repo.name}: ${err}') + } + + repo.cache_set() or { + return error('Failed to update cache for repository ${repo.name}: ${err}') + } } // Helper to load remote tags fn (mut repo GitRepo) load_branches() ! { - tags_result := repo.exec("git for-each-ref --format='%(objectname) %(refname:short)' refs/heads refs/remotes/origin")! + tags_result := repo.exec("git for-each-ref --format='%(objectname) %(refname:short)' refs/heads refs/remotes/origin") or { + return error('Failed to get branch references: ${err}. Command: git for-each-ref') + } for line in tags_result.split('\n') { - if line.trim_space() != '' { - parts := line.split(' ') - if parts.len == 2 { - commit_hash := parts[0].trim_space() - mut name := parts[1].trim_space() - if name.contains('_archive') { - continue - } else if name == 'origin' { - repo.status_remote.ref_default = commit_hash - } else if name.starts_with('origin') { - name = name.all_after('origin/').trim_space() - // Update remote tags info - repo.status_remote.branches[name] = commit_hash - } else { - repo.status_local.branches[name] = commit_hash - } + line_trimmed := line.trim_space() + // println(line_trimmed) + if line_trimmed != '' { + parts := line_trimmed.split(' ') + if parts.len < 2 { + // console.print_debug('Info: skipping malformed branch/tag line: ${line_trimmed}') + continue + } + commit_hash := parts[0].trim_space() + mut name := parts[1].trim_space() + if name.contains('_archive') { + continue + } else if name == 'origin' { + repo.status_remote.ref_default = commit_hash + } else if name.starts_with('origin') { + name = name.all_after('origin/').trim_space() + // Update remote tags info + repo.status_remote.branches[name] = commit_hash + } else { + repo.status_local.branches[name] = commit_hash } } } - mybranch := repo.exec('git branch --show-current')!.split_into_lines().filter(it.trim_space() != '') + mybranch := repo.exec('git branch --show-current') or { + return error('Failed to get current branch: ${err}') + }.split_into_lines().filter(it.trim_space() != '') if mybranch.len == 1 { repo.status_local.branch = mybranch[0].trim_space() + } else { + return error('bug: git branch does not give branchname.\n${mybranch}') } - // Could be a tag. - // else{ - // panic("bug: git branch does not give branchname") - // } } // Helper to load remote tags fn (mut repo GitRepo) load_tags() ! { - tags_result := repo.exec('git tag --list')! - + tags_result := repo.exec('git tag --list') or { + return error('Failed to list tags: ${err}. Please ensure git is installed and repository is accessible.') + } + //println(tags_result) for line in tags_result.split('\n') { - if line.trim_space() != '' { - parts := line.split(' ') - if parts.len == 2 { - commit_hash := parts[0].trim_space() - tag_name := parts[1].all_after('refs/tags/').trim_space() - - // Update remote tags info - repo.status_remote.tags[tag_name] = commit_hash + line_trimmed := line.trim_space() + if line_trimmed != '' { + parts := line_trimmed.split(' ') + if parts.len < 2 { + //console.print_debug('Skipping malformed tag line: ${line_trimmed}') + continue } + commit_hash := parts[0].trim_space() + tag_name := parts[1].all_after('refs/tags/').trim_space() + + // Update remote tags info + repo.status_remote.tags[tag_name] = commit_hash } } } diff --git a/lib/develop/gittools/repository_utils.v b/lib/develop/gittools/repository_utils.v index d2f9c765..0e895a00 100644 --- a/lib/develop/gittools/repository_utils.v +++ b/lib/develop/gittools/repository_utils.v @@ -13,24 +13,27 @@ pub mut: create bool } -//get the key in redis where json cached info is +// get the key in redis where json cached info is pub fn (mut repo GitRepo) cache_key() string { return '${repo.gs.cache_key()}:${repo.provider}:${repo.account}:${repo.name}' } -//get path where the repo is on the fs +pub fn (mut repo GitRepo) print_key() string { + return '${repo.provider}:${repo.account}:${repo.name}' +} + +// get path where the repo is on the fs pub fn (repo GitRepo) path() string { - mut repo_:=repo - mypath:=repo_.gs.coderoot.path + mut repo_ := repo + mypath := repo_.gs.coderoot.path return '${mypath}/${repo.provider}/${repo.account}/${repo.name}' } -//get herolib path object +// get herolib path object pub fn (repo GitRepo) patho() !pathlib.Path { return pathlib.get_dir(path: repo.path(), create: false)! } - // gets the path of a given url within a repo // ex: 'https://git.ourworld.tf/ourworld_holding/info_ourworld/src/branch/main/books/cocreation/SUMMARY.md' // returns /books/cocreation/SUMMARY.md @@ -64,7 +67,7 @@ pub fn (mut repo GitRepo) get_path_of_url(url string) !string { // Relative path inside the gitstructure, pointing to the repo pub fn (repo GitRepo) get_relative_path() !string { mut mypath := repo.patho()! - mut repo_:=repo + mut repo_ := repo return mypath.path_relative(repo_.gs.coderoot.path) or { panic("couldn't get relative path") } } diff --git a/lib/installers/infra/gitea/gitea_actions.v b/lib/installers/infra/gitea/gitea_actions.v index 854015ff..e9525605 100644 --- a/lib/installers/infra/gitea/gitea_actions.v +++ b/lib/installers/infra/gitea/gitea_actions.v @@ -2,106 +2,61 @@ module gitea import freeflowuniverse.herolib.osal import freeflowuniverse.herolib.core +import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.ui.console -import freeflowuniverse.herolib.installers.base import freeflowuniverse.herolib.installers.ulist -import freeflowuniverse.herolib.installers.db.postgresql as postgres_installer -import freeflowuniverse.herolib.installers.virt.podman as podman_installer import freeflowuniverse.herolib.osal.zinit import os -const postgres_container_name = 'herocontainer_postgresql' - -// checks if a certain version or above is installed fn installed() !bool { - mut podman := podman_installer.get()! - podman.install()! - - // We need to check also if postgres is installed - mut result := os.execute('podman healthcheck run ${postgres_container_name}') - - if result.exit_code != 0 { - return false - } - - result = os.execute('gitea -v') - - if result.exit_code != 0 { + res := os.execute('${osal.profile_path_source_and()!} gitea version') + if res.exit_code == 0 { + r := res.output.split_into_lines().filter(it.trim_space().len > 0) + if r.len != 1 { + return error("couldn't parse gitea version.\n${res.output}") + } + if texttools.version(version) > texttools.version(r[0]) { + return false + } + } else { return false } return true } -fn install_postgres(cfg GiteaServer) ! { - postgres_heroscript := " -!!postgresql.configure - name: '${cfg.database_name}' - user: '${cfg.database_user}' - password: '${cfg.database_passwd}' - host: '${cfg.database_host}' - port: ${cfg.database_port} - volume_path:'/var/lib/postgresql/data' - container_name: '${postgres_container_name}' -" - - postgres_installer.play(heroscript: postgres_heroscript)! - mut postgres := postgres_installer.get()! - postgres.install()! - postgres.start()! -} - fn install() ! { - if installed()! { - console.print_header('gitea binaraies already installed') - return - } console.print_header('install gitea') - server := get()! + baseurl:="https://github.com/go-gitea/gitea/releases/download/v${version}/gitea-${version}" - // make sure we install base on the node - base.install()! - install_postgres(server)! - - mut download_link := '' - - is_linux_intel := core.is_linux_intel()! - is_osx_arm := core.is_osx_arm()! - - if is_linux_intel { - download_link = 'https://dl.gitea.com/gitea/${server.version}/gitea-${server.version}-linux-amd64' + mut url := '' + if core.is_linux_arm()! { + //https://github.com/go-gitea/gitea/releases/download/v1.23.2/gitea-1.23.2-linux-arm64.xz + url = '${baseurl}-linux-arm64.xz' + } else if core.is_linux_intel()! { + // https://github.com/go-gitea/gitea/releases/download/v1.23.2/gitea-1.23.2-linux-amd64.xz + url = '${baseurl}-linux-amd64.xz' + } else if core.is_osx_arm()! { + //https://github.com/go-gitea/gitea/releases/download/v1.23.2/gitea-1.23.2-darwin-10.12-arm64.xz + url = '${baseurl}-darwin-10.12-arm64.xz' + } else if core.is_osx_intel()! { + //https://github.com/go-gitea/gitea/releases/download/v1.23.2/gitea-1.23.2-darwin-10.12-amd64.xz + url = '${baseurl}-darwin-10.12-amd64.xz' + } else { + return error('unsported platform') } - if is_osx_arm { - download_link = 'https://dl.gitea.com/gitea/${server.version}/gitea-${server.version}-darwin-10.12-amd64' - } + mut dest := osal.download( + url: url + minsize_kb: 9000 + expand_dir: '/tmp/gitea' + )! - if download_link.len == 0 { - return error('unsupported platform') - } - - binary := osal.download( - url: download_link - name: 'gitea' - dest: '/tmp/gitea' - ) or { return error('failed to download gitea due to: ${err}') } - - mut res := os.execute('sudo cp ${binary.path} /usr/local/bin/gitea') - if res.exit_code != 0 { - return error('failed to add gitea to the path due to: ${res.output}') - } - - res = os.execute('sudo chmod +x /usr/local/bin/gitea') - if res.exit_code != 0 { - return error('failed to make gitea executable due to: ${res.output}') - } - - // create config file - file_content := $tmpl('./templates/app.ini') - mut file := os.open_file('/etc/gitea_app.ini', 'w')! - file.write(file_content.bytes())! - - console.print_header('gitea installed properly.') + mut binpath := dest.file_get('gitea')! + osal.cmd_add( + cmdname: 'gitea' + source: binpath.path + )! } fn build() ! { @@ -137,65 +92,80 @@ fn ulist_get() !ulist.UList { fn upload() ! {} fn startupcmd() ![]zinit.ZProcessNewArgs { + mut cfg := get()! mut res := []zinit.ZProcessNewArgs{} - cfg := get()! res << zinit.ZProcessNewArgs{ name: 'gitea' - // cmd: 'GITEA_WORK_DIR=${cfg.path} sudo -u git /var/lib/git/gitea web -c /etc/gitea_app.ini' - cmd: ' - -# Variables -GITEA_USER="${cfg.run_user}" -GITEA_HOME="${cfg.path}" -GITEA_BINARY="/usr/local/bin/gitea" -GITEA_CONFIG="/etc/gitea_app.ini" -GITEA_DATA_PATH="\$GITEA_HOME/data" -GITEA_CUSTOM_PATH="\$GITEA_HOME/custom" -GITEA_LOG_PATH="\$GITEA_HOME/log" - -# Ensure the script is run as root -if [[ \$EUID -ne 0 ]]; then - echo "This script must be run as root." - exit 1 -fi - -echo "Setting up Gitea..." - -# Create Gitea user if it doesn\'t exist -if id -u "\$GITEA_USER" &>/dev/null; then - echo "User \$GITEA_USER already exists." -else - echo "Creating Gitea user..." - if ! sudo adduser --system --shell /bin/bash --group --disabled-password --home "/var/lib/\$GITEA_USER" "\$GITEA_USER"; then - echo "Failed to create user \$GITEA_USER." - exit 1 - fi -fi - -# Create necessary directories -echo "Creating directories..." -mkdir -p "\$GITEA_DATA_PATH" "\$GITEA_CUSTOM_PATH" "\$GITEA_LOG_PATH" -chown -R "\$GITEA_USER:\$GITEA_USER" "\$GITEA_HOME" -chmod -R 750 "\$GITEA_HOME" - -chown "\$GITEA_USER:\$GITEA_USER" "\$GITEA_CONFIG" -chmod 640 "\$GITEA_CONFIG" - -GITEA_WORK_DIR=\$GITEA_HOME sudo -u git gitea web -c \$GITEA_CONFIG -' - workdir: cfg.path - } - res << zinit.ZProcessNewArgs{ - name: 'restart_gitea' - cmd: 'sleep 30 && zinit restart gitea && exit 1' - after: ['gitea'] - oneshot: true - workdir: cfg.path + cmd: 'gitea server' + env: { + 'HOME': os.home_dir() + 'GITEA_CONFIG': cfg.config_path() + } } return res + + + +// mut res := []zinit.ZProcessNewArgs{} +// cfg := get()! +// res << zinit.ZProcessNewArgs{ +// name: 'gitea' +// // cmd: 'GITEA_WORK_DIR=${cfg.path} sudo -u git /var/lib/git/gitea web -c /etc/gitea_app.ini' +// cmd: ' + +// # Variables +// GITEA_USER="${cfg.run_user}" +// GITEA_HOME="${cfg.path}" +// GITEA_BINARY="/usr/local/bin/gitea" +// GITEA_CONFIG="/etc/gitea_app.ini" +// GITEA_DATA_PATH="\$GITEA_HOME/data" +// GITEA_CUSTOM_PATH="\$GITEA_HOME/custom" +// GITEA_LOG_PATH="\$GITEA_HOME/log" + +// # Ensure the script is run as root +// if [[ \$EUID -ne 0 ]]; then +// echo "This script must be run as root." +// exit 1 +// fi + +// echo "Setting up Gitea..." + +// # Create Gitea user if it doesn\'t exist +// if id -u "\$GITEA_USER" &>/dev/null; then +// echo "User \$GITEA_USER already exists." +// else +// echo "Creating Gitea user..." +// if ! sudo adduser --system --shell /bin/bash --group --disabled-password --home "/var/lib/\$GITEA_USER" "\$GITEA_USER"; then +// echo "Failed to create user \$GITEA_USER." +// exit 1 +// fi +// fi + +// # Create necessary directories +// echo "Creating directories..." +// mkdir -p "\$GITEA_DATA_PATH" "\$GITEA_CUSTOM_PATH" "\$GITEA_LOG_PATH" +// chown -R "\$GITEA_USER:\$GITEA_USER" "\$GITEA_HOME" +// chmod -R 750 "\$GITEA_HOME" + +// chown "\$GITEA_USER:\$GITEA_USER" "\$GITEA_CONFIG" +// chmod 640 "\$GITEA_CONFIG" + +// GITEA_WORK_DIR=\$GITEA_HOME sudo -u git gitea web -c \$GITEA_CONFIG +// ' +// workdir: cfg.path +// } +// res << zinit.ZProcessNewArgs{ +// name: 'restart_gitea' +// cmd: 'sleep 30 && zinit restart gitea && exit 1' +// after: ['gitea'] +// oneshot: true +// workdir: cfg.path +// } +// return res } fn running() !bool { + //TODO: extend with proper gitea client res := os.execute('curl -fsSL http://localhost:3000 || exit 1') return res.exit_code == 0 } diff --git a/lib/installers/infra/gitea/gitea_factory_.v b/lib/installers/infra/gitea/gitea_factory_.v index 0c0e944b..5254bb21 100644 --- a/lib/installers/infra/gitea/gitea_factory_.v +++ b/lib/installers/infra/gitea/gitea_factory_.v @@ -2,239 +2,289 @@ module gitea import freeflowuniverse.herolib.core.base import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.data.paramsparser + import freeflowuniverse.herolib.sysadmin.startupmanager import freeflowuniverse.herolib.osal.zinit -import freeflowuniverse.herolib.ui.console import time __global ( - gitea_global map[string]&GiteaServer - gitea_default string + gitea_global map[string]&GiteaServer + gitea_default string ) /////////FACTORY @[params] -pub struct ArgsGet { +pub struct ArgsGet{ pub mut: - name string = 'default' + name string } -fn args_get(args_ ArgsGet) ArgsGet { - mut args := args_ - if args.name == '' { - args.name = gitea_default - } - if args.name == '' { - args.name = 'default' - } - return args +fn args_get (args_ ArgsGet) ArgsGet { + mut args:=args_ + if args.name == ""{ + args.name = "default" + } + return args } -pub fn get(args_ ArgsGet) !&GiteaServer { - mut args := args_get(args_) - if args.name !in gitea_global { - if !config_exists() { - if default { - config_save()! - } - } - config_load()! - } - return gitea_global[args.name] or { - println(gitea_global) - panic('failed to get gitea server for ${args.name}') - } +pub fn get(args_ ArgsGet) !&GiteaServer { + mut context:=base.context()! + mut args := args_get(args_) + mut obj := GiteaServer{} + if !(args.name in gitea_global) { + if ! exists(args)!{ + set(obj)! + }else{ + heroscript := context.hero_config_get("gitea",args.name)! + mut obj2:=heroscript_loads(heroscript)! + set_in_mem(obj2)! + } + } + return gitea_global[args.name] or { + println(gitea_global) + //bug if we get here because should be in globals + panic("could not get config for gitea with name, is bug:${args.name}") + } } -fn config_exists(args_ ArgsGet) bool { - mut args := args_get(args_) - mut context := base.context() or { panic('bug') } - return context.hero_config_exists('gitea', args.name) +//register the config for the future +pub fn set(o GiteaServer)! { + set_in_mem(o)! + mut context := base.context()! + heroscript := heroscript_dumps(o)! + context.hero_config_set("gitea", o.name, heroscript)! } -fn config_load(args_ ArgsGet) ! { - mut args := args_get(args_) - mut context := base.context()! - mut heroscript := context.hero_config_get('gitea', args.name)! - play(heroscript: heroscript)! +//does the config exists? +pub fn exists(args_ ArgsGet) !bool { + mut context := base.context()! + mut args := args_get(args_) + return context.hero_config_exists("gitea", args.name) } -fn config_save(args_ ArgsGet) ! { - mut args := args_get(args_) - mut context := base.context()! - context.hero_config_set('gitea', args.name, heroscript_default()!)! +pub fn delete(args_ ArgsGet)! { + mut args := args_get(args_) + mut context:=base.context()! + context.hero_config_delete("gitea",args.name)! + if args.name in gitea_global { + //del gitea_global[args.name] + } } -fn set(o GiteaServer) ! { - mut o2 := obj_init(o)! - gitea_global['default'] = &o2 +//only sets in mem, does not set as config +fn set_in_mem(o GiteaServer)! { + mut o2:=obj_init(o)! + gitea_global[o.name] = &o2 + gitea_default = o.name } + @[params] pub struct PlayArgs { pub mut: - name string = 'default' - heroscript string // if filled in then plbook will be made out of it - plbook ?playbook.PlayBook - reset bool - - start bool - stop bool - restart bool - delete bool - configure bool // make sure there is at least one installed + heroscript string //if filled in then plbook will be made out of it + plbook ?playbook.PlayBook + reset bool } pub fn play(args_ PlayArgs) ! { - mut args := args_ + + mut args:=args_ - if args.heroscript == '' { - args.heroscript = heroscript_default()! - } - mut plbook := args.plbook or { playbook.new(text: args.heroscript)! } + mut plbook := args.plbook or { + playbook.new(text: args.heroscript)! + } + + mut install_actions := plbook.find(filter: 'gitea.configure')! + if install_actions.len > 0 { + for install_action in install_actions { + heroscript:=install_action.heroscript() + mut obj:=heroscript_loads(heroscript)! + set(obj)! + } + } + + mut other_actions := plbook.find(filter: 'gitea.')! + for other_action in other_actions { + if other_action.name in ["destroy","install","build"]{ + mut p := other_action.params + reset:=p.get_default_false("reset") + if other_action.name == "destroy" || reset{ + console.print_debug("install action gitea.destroy") + destroy()! + } + if other_action.name == "install"{ + console.print_debug("install action gitea.install") + install()! + } + } + if other_action.name in ["start","stop","restart"]{ + mut p := other_action.params + name := p.get('name')! + mut gitea_obj:=get(name:name)! + console.print_debug("action object:\n${gitea_obj}") + if other_action.name == "start"{ + console.print_debug("install action gitea.${other_action.name}") + gitea_obj.start()! + } + + if other_action.name == "stop"{ + console.print_debug("install action gitea.${other_action.name}") + gitea_obj.stop()! + } + if other_action.name == "restart"{ + console.print_debug("install action gitea.${other_action.name}") + gitea_obj.restart()! + } + } + } - mut install_actions := plbook.find(filter: 'gitea.configure')! - if install_actions.len > 0 { - for install_action in install_actions { - mut p := install_action.params - cfg := cfg_play(p)! - set(cfg)! - } - } } + //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS /////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager { - // unknown - // screen - // zinit - // tmux - // systemd - match cat { - .zinit { - console.print_debug('startupmanager: zinit') - return startupmanager.get(cat: .zinit)! - } - .systemd { - console.print_debug('startupmanager: systemd') - return startupmanager.get(cat: .systemd)! - } - else { - console.print_debug('startupmanager: auto') - return startupmanager.get()! - } - } + // unknown + // screen + // zinit + // tmux + // systemd + match cat{ + .zinit{ + console.print_debug("startupmanager: zinit") + return startupmanager.get(cat:.zinit)! + } + .systemd{ + console.print_debug("startupmanager: systemd") + return startupmanager.get(cat:.systemd)! + }else{ + console.print_debug("startupmanager: auto") + return startupmanager.get()! + } + } } -// load from disk and make sure is properly intialized +//load from disk and make sure is properly intialized pub fn (mut self GiteaServer) reload() ! { - switch(self.name) - self = obj_init(self)! + switch(self.name) + self=obj_init(self)! } pub fn (mut self GiteaServer) start() ! { - switch(self.name) - if self.running()! { - return - } + switch(self.name) + if self.running()!{ + return + } - console.print_header('gitea start') + console.print_header('gitea start') - if !installed()! { - install()! - } + if ! installed()!{ + install()! + } - configure()! + configure()! - start_pre()! + start_pre()! - for zprocess in startupcmd()! { - mut sm := startupmanager_get(zprocess.startuptype)! + for zprocess in startupcmd()!{ + mut sm:=startupmanager_get(zprocess.startuptype)! - console.print_debug('starting gitea with ${zprocess.startuptype}...') + console.print_debug('starting gitea with ${zprocess.startuptype}...') - sm.new(zprocess)! + sm.new(zprocess)! - sm.start(zprocess.name)! - } + sm.start(zprocess.name)! + } - start_post()! + start_post()! + + for _ in 0 .. 50 { + if self.running()! { + return + } + time.sleep(100 * time.millisecond) + } + return error('gitea did not install properly.') - for _ in 0 .. 50 { - if self.running()! { - return - } - time.sleep(100 * time.millisecond) - } - return error('cannot start gitea') } pub fn (mut self GiteaServer) install_start(args InstallArgs) ! { - switch(self.name) - self.install(args)! - self.start()! + switch(self.name) + self.install(args)! + self.start()! } pub fn (mut self GiteaServer) stop() ! { - switch(self.name) - stop_pre()! - for zprocess in startupcmd()! { - mut sm := startupmanager_get(zprocess.startuptype)! - sm.stop(zprocess.name)! - } - stop_post()! + switch(self.name) + stop_pre()! + for zprocess in startupcmd()!{ + mut sm:=startupmanager_get(zprocess.startuptype)! + sm.stop(zprocess.name)! + } + stop_post()! } pub fn (mut self GiteaServer) restart() ! { - switch(self.name) - self.stop()! - self.start()! + switch(self.name) + self.stop()! + self.start()! } pub fn (mut self GiteaServer) running() !bool { - switch(self.name) + switch(self.name) - // walk over the generic processes, if not running return - for zprocess in startupcmd()! { - mut sm := startupmanager_get(zprocess.startuptype)! - r := sm.running(zprocess.name)! - if r == false { - return false - } - } - return running()! + //walk over the generic processes, if not running return + for zprocess in startupcmd()!{ + mut sm:=startupmanager_get(zprocess.startuptype)! + r:=sm.running(zprocess.name)! + if r==false{ + return false + } + } + return running()! } @[params] -pub struct InstallArgs { +pub struct InstallArgs{ pub mut: - reset bool + reset bool } pub fn (mut self GiteaServer) install(args InstallArgs) ! { - switch(self.name) - if args.reset || (!installed()!) { - install()! - } + switch(self.name) + if args.reset || (!installed()!) { + install()! + } } pub fn (mut self GiteaServer) build() ! { - switch(self.name) - build()! + switch(self.name) + build()! } pub fn (mut self GiteaServer) destroy() ! { - switch(self.name) - - self.stop() or {} - destroy()! + switch(self.name) + self.stop() or {} + destroy()! } -// switch instance to be used for gitea + + +//switch instance to be used for gitea pub fn switch(name string) { - gitea_default = name + gitea_default = name +} + + +//helpers + +@[params] +pub struct DefaultConfigArgs{ + instance string = 'default' } diff --git a/lib/installers/infra/gitea/gitea_model.v b/lib/installers/infra/gitea/gitea_model.v index 228ca6dc..dccaebe5 100644 --- a/lib/installers/infra/gitea/gitea_model.v +++ b/lib/installers/infra/gitea/gitea_model.v @@ -1,117 +1,94 @@ module gitea - import freeflowuniverse.herolib.data.paramsparser -import freeflowuniverse.herolib.osal.zinit +import freeflowuniverse.herolib.data.encoderhero import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.osal +import os +import freeflowuniverse.herolib.clients.mailclient +import freeflowuniverse.herolib.clients.postgresql_client +import rand +pub const version = '0.0.0' const singleton = true -const default = true - -// TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT -pub fn heroscript_default() !string { - heroscript := " - !!gitea.configure - name:'gitea' - version:'1.22.6' - run_user: 'git' - path: '/var/lib/git' - passwd: '12345678' - postgresql_name: 'default' - mail_from: 'git@meet.tf' - smtp_addr: 'smtp-relay.brevo.com' - smtp_login: 'admin' - smtp_port: 587 - smtp_passwd: '12345678' - domain: 'meet.tf' - jwt_secret: '' - lfs_jwt_secret: '' - internal_token: '' - secret_key: '' - database_passwd: 'postgres' - database_name: 'postgres' - database_user: 'postgres' - database_host: 'localhost' - database_port: 5432 - " - - return heroscript -} - -// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED +const default = false +@[heap] pub struct GiteaServer { pub mut: - name string = 'default' - - // reset bool - version string = '1.22.6' - run_user string = 'git' - path string = '/var/lib/git' + name string = 'default' + path string = '${os.home_dir()}/hero/var/gitea' passwd string - mail_from string = 'git@meet.tf' - smtp_addr string = 'smtp-relay.brevo.com' - smtp_login string @[required] - smtp_port int = 587 - smtp_passwd string - domain string @[required] - jwt_secret string + domain string = "git.test.com" + jwt_secret string = rand.hex(12) lfs_jwt_secret string internal_token string secret_key string - - // Database config - database_passwd string = 'postgres' - database_name string = 'postgres' - database_user string = 'postgres' - database_host string = 'localhost' - database_port int = 5432 - - process ?zinit.ZProcess - path_config pathlib.Path + postgresql_client_name string = "default" + mail_client_name string = "default" } -fn cfg_play(p paramsparser.Params) !GiteaServer { - // THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above - mut mycfg := GiteaServer{ - name: p.get_default('name', 'default')! - version: p.get_default('version', '1.22.6')! - run_user: p.get_default('run_user', 'git')! - path: p.get_default('path', '/var/lib/git')! - passwd: p.get('passwd')! - mail_from: p.get_default('mail_from', 'git@meet.tf')! - smtp_addr: p.get_default('smtp_addr', 'smtp-relay.brevo.com')! - smtp_login: p.get('smtp_login')! - smtp_port: p.get_int_default('smtp_port', 587)! - smtp_passwd: p.get('smtp_passwd')! - domain: p.get('domain')! - jwt_secret: p.get('jwt_secret')! - lfs_jwt_secret: p.get('lfs_jwt_secret')! - internal_token: p.get('internal_token')! - secret_key: p.get('secret_key')! - // Set database config - database_passwd: p.get_default('database_passwd', 'postgres')! - database_name: p.get_default('database_name', 'postgres')! - database_user: p.get_default('database_user', 'postgres')! - database_host: p.get_default('database_host', 'localhost')! - database_port: p.get_int_default('database_port', 5432)! +pub fn (obj GiteaServer) config_path() string { + return '${obj.path}/config.ini' +} + +//your checking & initialization code if needed +fn obj_init(mycfg_ GiteaServer)!GiteaServer{ + mut mycfg:=mycfg_ + return mycfg +} + +//called before start if done +fn configure() ! { + mut server := get()! + + if !osal.cmd_exists('gitea') { + return error('gitea binary not found in path. Please install gitea first.') + } + // Generate and set any missing secrets + if server.lfs_jwt_secret == '' { + server.lfs_jwt_secret = os.execute_opt('gitea generate secret LFS_JWT_SECRET')!.output.trim_space() + set(server)! + } + if server.internal_token == '' { + server.internal_token = os.execute_opt('gitea generate secret INTERNAL_TOKEN')!.output.trim_space() + set(server)! + } + if server.secret_key == '' { + server.secret_key = os.execute_opt('gitea generate secret SECRET_KEY')!.output.trim_space() + set(server)! } - return mycfg + // Initialize required clients with detailed error handling + mut db_client := postgresql_client.get(name: server.postgresql_client_name) or { + return error('Failed to initialize PostgreSQL client "${server.postgresql_client_name}": ${err}') + } + mut mail_client := mailclient.get(name: server.mail_client_name) or { + return error('Failed to initialize mail client "${server.mail_client_name}": ${err}') + } + + //TODO: check database exists + if !db_client.db_exists('gitea_${server.name}')! { + console.print_header('Creating database gitea_${server.name} for gitea.') + db_client.db_create('gitea_${server.name}')! + } + + db_client.dbname = 'gitea_${server.name}' + + mut mycode := $tmpl('templates/app.ini') + mut path := pathlib.get_file(path: server.config_path(), create: true)! + path.write(mycode)! + console.print_debug(mycode) } -fn obj_init(obj_ GiteaServer) !GiteaServer { - // never call get here, only thing we can do here is work on object itself - mut obj := obj_ - return obj +/////////////NORMALLY NO NEED TO TOUCH + +pub fn heroscript_dumps(obj GiteaServer) !string { + return encoderhero.encode[GiteaServer ](obj)! } -// called before start if done -fn configure() ! { - // mut installer := get()! - - // mut mycode := $tmpl('templates/atemplate.yaml') - // mut path := pathlib.get_file(path: cfg.configpath, create: true)! - // path.write(mycode)! - // console.print_debug(mycode) +pub fn heroscript_loads(heroscript string) !GiteaServer { + mut obj := encoderhero.decode[GiteaServer](heroscript)! + return obj } diff --git a/lib/installers/infra/gitea/readme.md b/lib/installers/infra/gitea/readme.md index 23f1646f..9dfb7153 100644 --- a/lib/installers/infra/gitea/readme.md +++ b/lib/installers/infra/gitea/readme.md @@ -6,31 +6,24 @@ To get started ```vlang +import freeflowuniverse.herolib.installers.infra.gitea as gitea_installer -import freeflowuniverse.herolib.installers.something. gitea - -mut installer:= gitea.get()! +//if you want to configure using heroscript +gitea_installer.play(heroscript:' + !!gitea.configure name:test + passwd:'something' + domain: 'docs.info.com' + ')! +mut installer:= gitea_installer.get(name:'test')! installer.start()! - - -``` - -## example heroscript - - -```hero -!!gitea.install - homedir: '/home/user/gitea' - username: 'admin' - password: 'secretpassword' - title: 'Some Title' - host: 'localhost' - port: 8888 - ``` +this will look for a configured mail & postgresql client both on instance name: "default", change in heroscript if needed + +- postgresql_client_name = "default" +- mail_client_name = "default" \ No newline at end of file diff --git a/lib/installers/infra/gitea/templates/app.ini b/lib/installers/infra/gitea/templates/app.ini index c5bfed3a..acbf5ba8 100644 --- a/lib/installers/infra/gitea/templates/app.ini +++ b/lib/installers/infra/gitea/templates/app.ini @@ -1,6 +1,6 @@ APP_NAME = ${server.name} RUN_MODE = prod -RUN_USER = ${server.run_user} + WORK_PATH = ${server.path} [repository] @@ -26,12 +26,12 @@ LFS_JWT_SECRET = ${server.lfs_jwt_secret} OFFLINE_MODE = false [database] -PATH = ${server.path}/gitea.db +PATH = /tmp/gitea.db DB_TYPE = postgres -HOST = ${server.database_host}:${server.database_port} -NAME = ${server.database_name} -USER = ${server.database_user} -PASSWD = ${server.database_passwd} +HOST = ${db_client.host}:${db_client.port} +NAME = ${db_client.dbname} +USER = ${db_client.user} +PASSWD = ${db_client.password} LOG_SQL = false SCHEMA = SSL_MODE = disable @@ -80,12 +80,12 @@ PATH = ${server.path}/lfs [mailer] ENABLED = true -FROM = ${server.mail_from} +FROM = ${mail_client.mail_from} ; PROTOCOL = smtps -SMTP_ADDR = ${server.smtp_addr} -SMTP_PORT = ${server.smtp_port} -USER = ${server.smtp_login} -PASSWD = ${server.smtp_passwd} +SMTP_ADDR = ${mail_client.mail_server} +SMTP_PORT = ${mail_client.mail_port} +USER = ${mail_client.mail_username} +PASSWD = ${mail_client.mail_password} [openid] ENABLE_OPENID_SIGNIN = true @@ -105,4 +105,3 @@ JWT_SECRET = ${server.jwt_secret} [actions] ENABLED=true - diff --git a/lib/installers/infra/livekit/livekit_model.v b/lib/installers/infra/livekit/livekit_model.v index 2ff96b36..497cadf6 100644 --- a/lib/installers/infra/livekit/livekit_model.v +++ b/lib/installers/infra/livekit/livekit_model.v @@ -7,7 +7,6 @@ pub const version = '1.7.2' const singleton = false const default = true -// TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT pub fn heroscript_default() !string { heroscript := " !!livekit.configure diff --git a/lib/installers/infra/screen/.heroscript b/lib/installers/infra/screen/.heroscript new file mode 100644 index 00000000..5b8239b8 --- /dev/null +++ b/lib/installers/infra/screen/.heroscript @@ -0,0 +1,13 @@ + +!!hero_code.generate_installer + name:'screen' + classname:'Screen' + singleton:0 + templates:0 + default:1 + title:'' + supported_platforms:'' + reset:0 + startupmanager:0 + hasconfig:0 + build:0 \ No newline at end of file diff --git a/lib/installers/infra/screen/readme.md b/lib/installers/infra/screen/readme.md new file mode 100644 index 00000000..f8480f1f --- /dev/null +++ b/lib/installers/infra/screen/readme.md @@ -0,0 +1,44 @@ +# screen + + + +To get started + +```vlang + + +import freeflowuniverse.herolib.installers.something.screen as screen_installer + +heroscript:=" +!!screen.configure name:'test' + password: '1234' + port: 7701 + +!!screen.start name:'test' reset:1 +" + +screen_installer.play(heroscript=heroscript)! + +//or we can call the default and do a start with reset +//mut installer:= screen_installer.get()! +//installer.start(reset:true)! + + + + +``` + +## example heroscript + +```hero +!!screen.configure + homedir: '/home/user/screen' + username: 'admin' + password: 'secretpassword' + title: 'Some Title' + host: 'localhost' + port: 8888 + +``` + + diff --git a/lib/installers/infra/screen/screen_actions.v b/lib/installers/infra/screen/screen_actions.v new file mode 100644 index 00000000..616be036 --- /dev/null +++ b/lib/installers/infra/screen/screen_actions.v @@ -0,0 +1,63 @@ +module screen + +import freeflowuniverse.herolib.core +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.installers.ulist +import os + +//////////////////// following actions are not specific to instance of the object + +// checks if a certain version or above is installed +fn installed() !bool { + res := os.execute('screen --version') + if res.exit_code != 0 { + return false + } + + return true +} + +// get the Upload List of the files +fn ulist_get() !ulist.UList { + // optionally build a UList which is all paths which are result of building, is then used e.g. in upload + return ulist.UList{} +} + +// uploads to S3 server if configured +fn upload() ! { +} + +fn install() ! { + console.print_header('install screen') + + if core.is_ubuntu()! { + res := os.execute('sudo apt install screen -y') + if res.exit_code != 0 { + return error('failed to install screen: ${res.output}') + } + } else if core.is_osx()! { + res := os.execute('sudo brew install screen') + if res.exit_code != 0 { + return error('failed to install screen: ${res.output}') + } + } else { + return error('unsupported platform: ${core.platform()!}') + } +} + +fn destroy() ! { + console.print_header('uninstall screen') + if core.is_ubuntu()! { + res := os.execute('sudo apt remove screen -y') + if res.exit_code != 0 { + return error('failed to uninstall screen: ${res.output}') + } + } else if core.is_osx()! { + res := os.execute('sudo brew uninstall screen') + if res.exit_code != 0 { + return error('failed to uninstall screen: ${res.output}') + } + } else { + return error('unsupported platform: ${core.platform()!}') + } +} diff --git a/lib/installers/infra/screen/screen_factory_.v b/lib/installers/infra/screen/screen_factory_.v new file mode 100644 index 00000000..fff3aaa7 --- /dev/null +++ b/lib/installers/infra/screen/screen_factory_.v @@ -0,0 +1,71 @@ +module screen + +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.sysadmin.startupmanager +import freeflowuniverse.herolib.osal.zinit + +__global ( + screen_global map[string]&Screen + screen_default string +) + +/////////FACTORY + +@[params] +pub struct ArgsGet { +pub mut: + name string +} + +pub fn get(args_ ArgsGet) !&Screen { + return &Screen{} +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS /////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////////////////////////// + +fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager { + // unknown + // screen + // zinit + // tmux + // systemd + match cat { + .zinit { + console.print_debug('startupmanager: zinit') + return startupmanager.get(cat: .zinit)! + } + .systemd { + console.print_debug('startupmanager: systemd') + return startupmanager.get(cat: .systemd)! + } + else { + console.print_debug('startupmanager: auto') + return startupmanager.get()! + } + } +} + +@[params] +pub struct InstallArgs { +pub mut: + reset bool +} + +pub fn (mut self Screen) install(args InstallArgs) ! { + switch(self.name) + if args.reset || (!installed()!) { + install()! + } +} + +pub fn (mut self Screen) destroy() ! { + switch(self.name) + destroy()! +} + +// switch instance to be used for screen +pub fn switch(name string) { + screen_default = name +} diff --git a/lib/installers/infra/screen/screen_model.v b/lib/installers/infra/screen/screen_model.v new file mode 100644 index 00000000..2c727f13 --- /dev/null +++ b/lib/installers/infra/screen/screen_model.v @@ -0,0 +1,22 @@ +module screen + +const singleton = false +const default = true + +// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED +@[heap] +pub struct Screen { +pub mut: + name string = 'default' +} + +fn obj_init(obj_ Screen) !Screen { + // never call get here, only thing we can do here is work on object itself + mut obj := obj_ + return obj +} + +// called before start if done +fn configure() ! { + // mut installer := get()! +} diff --git a/lib/installers/lang/golang/golang_actions.v b/lib/installers/lang/golang/golang_actions.v index 68ee4164..2e1af597 100644 --- a/lib/installers/lang/golang/golang_actions.v +++ b/lib/installers/lang/golang/golang_actions.v @@ -51,7 +51,7 @@ fn install_() ! { expand_dir := '/tmp/golang' // the downloader is cool, it will check the download succeeds and also check the minimum size - dest := osal.download( + _ := osal.download( url: url minsize_kb: 40000 expand_dir: expand_dir diff --git a/lib/installers/lang/rust/rust_actions.v b/lib/installers/lang/rust/rust_actions.v index 24ddda1a..cc19be4b 100644 --- a/lib/installers/lang/rust/rust_actions.v +++ b/lib/installers/lang/rust/rust_actions.v @@ -43,8 +43,6 @@ fn upload_() ! { fn install_() ! { console.print_header('install rust') - version := '1.83.0' - base.install()! pl := core.platform()! diff --git a/lib/installers/sysadmintools/garage_s3/garage_s3_actions.v b/lib/installers/sysadmintools/garage_s3/garage_s3_actions.v index 1665465e..3e7a69e2 100644 --- a/lib/installers/sysadmintools/garage_s3/garage_s3_actions.v +++ b/lib/installers/sysadmintools/garage_s3/garage_s3_actions.v @@ -30,7 +30,7 @@ fn installed() !bool { fn install() ! { console.print_header('install garage_s3') - mut installer := get()! + // mut installer := get()! // THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED mut url := '' if core.is_linux_arm()! { @@ -75,7 +75,7 @@ fn startupcmd() ![]zinit.ZProcessNewArgs { } fn running_() !bool { - mut installer := get()! + _ := get()! // THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED // this checks health of garage_s3 // curl http://localhost:3333/api/v1/s --oauth2-bearer 1234 works diff --git a/lib/installers/virt/docker/docker_factory_.v b/lib/installers/virt/docker/docker_factory_.v index 58e91414..8fef7e11 100644 --- a/lib/installers/virt/docker/docker_factory_.v +++ b/lib/installers/virt/docker/docker_factory_.v @@ -1,4 +1,3 @@ - module docker import freeflowuniverse.herolib.ui.console diff --git a/lib/installers/web/bun/.heroscript b/lib/installers/web/bun/.heroscript new file mode 100644 index 00000000..6c6f976e --- /dev/null +++ b/lib/installers/web/bun/.heroscript @@ -0,0 +1,13 @@ + +!!hero_code.generate_installer + name:'bun' + classname:'Bun' + singleton:1 + templates:0 + default:1 + title:'' + supported_platforms:'' + reset:0 + startupmanager:0 + hasconfig:0 + build:0 \ No newline at end of file diff --git a/lib/installers/web/bun/bun_actions.v b/lib/installers/web/bun/bun_actions.v new file mode 100644 index 00000000..6d88911a --- /dev/null +++ b/lib/installers/web/bun/bun_actions.v @@ -0,0 +1,66 @@ +module bun + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.installers.ulist +import os + +//////////////////// following actions are not specific to instance of the object + +// checks if a certain version or above is installed +fn installed() !bool { + checkcmd := '${osal.profile_path_source_and()!} bun -version' + res := os.execute(checkcmd) + if res.exit_code != 0 { + println(res) + println(checkcmd) + return false + } + r := res.output.split_into_lines().filter(it.trim_space().len > 0) + if r.len != 1 { + return error("couldn't parse bun version.\n${res.output}") + } + println(' ${texttools.version(version)} <= ${texttools.version(r[0])}') + if texttools.version(version) <= texttools.version(r[0]) { + return true + } + return false +} + +// get the Upload List of the files +fn ulist_get() !ulist.UList { + // optionally build a UList which is all paths which are result of building, is then used e.g. in upload + return ulist.UList{} +} + +// uploads to S3 server if configured +fn upload() ! { + // installers.upload( + // cmdname: 'bun' + // source: '${gitpath}/target/x86_64-unknown-linux-musl/release/bun' + // )! +} + +fn install() ! { + console.print_header('install bun') + osal.exec(cmd: 'curl -fsSL https://bun.sh/install | bash')! +} + +fn destroy() ! { + // osal.process_kill_recursive(name:'bun')! + + osal.cmd_delete('bun')! + + osal.package_remove(' + bun + ')! + + // will remove all paths where bun is found + osal.profile_path_add_remove(paths2delete: 'bun')! + + osal.rm(' + ~/.bun + ')! +} diff --git a/lib/installers/web/bun/bun_factory_.v b/lib/installers/web/bun/bun_factory_.v new file mode 100644 index 00000000..96095c9a --- /dev/null +++ b/lib/installers/web/bun/bun_factory_.v @@ -0,0 +1,74 @@ +module bun + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.sysadmin.startupmanager +import freeflowuniverse.herolib.osal.zinit +import time + +__global ( + bun_global map[string]&Bun + bun_default string +) + +/////////FACTORY + +@[params] +pub struct ArgsGet { +pub mut: + name string +} + +pub fn get(args_ ArgsGet) !&Bun { + return &Bun{} +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// +//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS /////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////////////////////////// + +fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager { + // unknown + // screen + // zinit + // tmux + // systemd + match cat { + .zinit { + console.print_debug('startupmanager: zinit') + return startupmanager.get(cat: .zinit)! + } + .systemd { + console.print_debug('startupmanager: systemd') + return startupmanager.get(cat: .systemd)! + } + else { + console.print_debug('startupmanager: auto') + return startupmanager.get()! + } + } +} + +@[params] +pub struct InstallArgs { +pub mut: + reset bool +} + +pub fn (mut self Bun) install(args InstallArgs) ! { + switch(self.name) + if args.reset || (!installed()!) { + install()! + } +} + +pub fn (mut self Bun) destroy() ! { + switch(self.name) + destroy()! +} + +// switch instance to be used for bun +pub fn switch(name string) { + bun_default = name +} diff --git a/lib/installers/web/bun/bun_model.v b/lib/installers/web/bun/bun_model.v new file mode 100644 index 00000000..99cb0cc3 --- /dev/null +++ b/lib/installers/web/bun/bun_model.v @@ -0,0 +1,26 @@ +module bun + +import freeflowuniverse.herolib.data.paramsparser +import os + +pub const version = '1.2.2' +const singleton = true +const default = true + +// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED +@[heap] +pub struct Bun { +pub mut: + name string = 'default' +} + +fn obj_init(obj_ Bun) !Bun { + // never call get here, only thing we can do here is work on object itself + mut obj := obj_ + return obj +} + +// called before start if done +fn configure() ! { + // mut installer := get()! +} diff --git a/lib/installers/web/bun/readme.md b/lib/installers/web/bun/readme.md new file mode 100644 index 00000000..e756650f --- /dev/null +++ b/lib/installers/web/bun/readme.md @@ -0,0 +1,44 @@ +# bun + + + +To get started + +```vlang + + +import freeflowuniverse.herolib.installers.something.bun as bun_installer + +heroscript:=" +!!bun.configure name:'test' + password: '1234' + port: 7701 + +!!bun.start name:'test' reset:1 +" + +bun_installer.play(heroscript=heroscript)! + +//or we can call the default and do a start with reset +//mut installer:= bun_installer.get()! +//installer.start(reset:true)! + + + + +``` + +## example heroscript + +```hero +!!bun.configure + homedir: '/home/user/bun' + username: 'admin' + password: 'secretpassword' + title: 'Some Title' + host: 'localhost' + port: 8888 + +``` + + diff --git a/lib/lang/python/readme.md b/lib/lang/python/readme.md index 444ecb98..b3a71a6d 100644 --- a/lib/lang/python/readme.md +++ b/lib/lang/python/readme.md @@ -20,7 +20,7 @@ source ~/hero/python/default/bin/activate ```v -#!/usr/bin/env -S v -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.lang.python diff --git a/lib/osal/cmds.v b/lib/osal/cmds.v index b27871f7..78d36fc0 100644 --- a/lib/osal/cmds.v +++ b/lib/osal/cmds.v @@ -109,7 +109,13 @@ pub fn profile_path_source() !string { } pp := profile_path()! if os.exists(pp) { - return 'source ${pp}' + res := os.execute('source ${pp}') + if res.exit_code != 0 { + console.print_stderr('WARNING: your profile is corrupt: ${pp}') + return error('profile corrupt') + } else { + return 'source ${pp}' + } } return '' } @@ -117,14 +123,8 @@ pub fn profile_path_source() !string { // return source $path && . // or empty if it doesn't exist pub fn profile_path_source_and() !string { - if core.hostname() or { '' } == 'rescue' { - return '' - } - pp := profile_path()! - if os.exists(pp) { - return '. ${pp} &&' - } - return '' + p := profile_path_source() or { return '' } + return '${p} && ' } fn profile_paths_get(content string) []string { diff --git a/lib/osal/downloader.v b/lib/osal/downloader.v index cfe4e0cd..465f5f01 100644 --- a/lib/osal/downloader.v +++ b/lib/osal/downloader.v @@ -26,6 +26,11 @@ pub mut: pub fn download(args_ DownloadArgs) !pathlib.Path { mut args := args_ + args.dest = args.dest.trim(' ').trim_right('/') + args.expand_dir = args.expand_dir.trim(' ').trim_right('/') + args.expand_file = args.expand_file.replace('//', '/') + args.dest = args.dest.replace('//', '/') + console.print_header('download: ${args.url}') if args.name == '' { if args.dest != '' { @@ -38,7 +43,7 @@ pub fn download(args_ DownloadArgs) !pathlib.Path { args.name = lastname } if args.name == '' { - return error('cannot find name for download') + return error('cannot find name for download of \n\'${args_}\'') } } @@ -69,8 +74,30 @@ pub fn download(args_ DownloadArgs) !pathlib.Path { } if args.reset { - mut dest_delete := pathlib.get_file(path: args.dest + '_', check: false)! - dest_delete.delete()! + // Clean up all related files when resetting + if os.exists(args.dest) { + if os.is_dir(args.dest) { + os.rmdir_all(args.dest) or {} + } else { + os.rm(args.dest) or {} + } + } + if os.exists(args.dest + '_') { + if os.is_dir(args.dest + '_') { + os.rmdir_all(args.dest + '_') or {} + } else { + os.rm(args.dest + '_') or {} + } + } + if os.exists(args.dest + '.meta') { + if os.is_dir(args.dest + '.meta') { + os.rmdir_all(args.dest + '.meta') or {} + } else { + os.rm(args.dest + '.meta') or {} + } + } + // Recreate meta file after cleanup + meta = pathlib.get_file(path: args.dest + '.meta', create: true)! } meta.write(args.url.trim_space())! @@ -89,8 +116,15 @@ pub fn download(args_ DownloadArgs) !pathlib.Path { if todownload { mut dest0 := pathlib.get_file(path: args.dest + '_')! + // Clean up any existing temporary file/directory before download + if os.exists(dest0.path) { + if os.is_dir(dest0.path) { + os.rmdir_all(dest0.path) or {} + } else { + os.rm(dest0.path) or {} + } + } cmd := ' - rm -f ${dest0.path} cd /tmp curl -L \'${args.url}\' -o ${dest0.path} ' @@ -121,15 +155,26 @@ pub fn download(args_ DownloadArgs) !pathlib.Path { dest.check() } if args.expand_dir.len > 0 { + // Clean up directory if it exists if os.exists(args.expand_dir) { - os.rmdir_all(args.expand_dir)! + os.rmdir_all(args.expand_dir) or { + return error('Failed to remove existing directory ${args.expand_dir}: ${err}') + } } - return dest.expand(args.expand_dir)! } if args.expand_file.len > 0 { + // Clean up file/directory if it exists if os.exists(args.expand_file) { - os.rm(args.expand_file)! + if os.is_dir(args.expand_file) { + os.rmdir_all(args.expand_file) or { + return error('Failed to remove existing directory ${args.expand_file}: ${err}') + } + } else { + os.rm(args.expand_file) or { + return error('Failed to remove existing file ${args.expand_file}: ${err}') + } + } } return dest.expand(args.expand_file)! } diff --git a/lib/osal/hostsfile/README.md b/lib/osal/hostsfile/README.md index f42ba55e..4a712fba 100644 --- a/lib/osal/hostsfile/README.md +++ b/lib/osal/hostsfile/README.md @@ -17,7 +17,7 @@ This module provides functionality to manage the system's hosts file (`/etc/host Create a file `example.vsh`: ```v -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import freeflowuniverse.herolib.osal.hostsfile import os diff --git a/lib/osal/notifier/notifier.v b/lib/osal/notifier/notifier.v index 6ce7af6a..197b10ed 100644 --- a/lib/osal/notifier/notifier.v +++ b/lib/osal/notifier/notifier.v @@ -1,6 +1,5 @@ module notifier -import os.notify import os import time @@ -13,30 +12,34 @@ pub enum NotifyEvent { } // NotifyCallback is the function signature for event callbacks -pub type NotifyCallback = fn (event NotifyEvent, path string) +pub type NotifyCallback = fn (event NotifyEvent, path string, args map[string]string) // WatchEntry represents a watched path and its associated callback struct WatchEntry { pub mut: path string callback ?NotifyCallback - fd int + pid int } -// Notifier manages file system notifications +// Notifier manages file system notifications using fswatch pub struct Notifier { pub mut: name string - watcher notify.FdNotifier watch_list []WatchEntry is_watching bool + args map[string]string } // new creates a new Notifier instance pub fn new(name string) !&Notifier { + // Check if fswatch is installed + if !os.exists_in_system_path('fswatch') { + return error('fswatch is not installed. Please install it first (brew install fswatch).') + } + return &Notifier{ name: name - watcher: notify.new()! watch_list: []WatchEntry{} is_watching: false } @@ -48,16 +51,10 @@ pub fn (mut n Notifier) add_watch(path string, callback NotifyCallback) ! { return error('Path does not exist: ${path}') } - mut f := os.open(path)! - fd := f.fd - f.close() - - n.watcher.add(fd, .write | .read, .edge_trigger)! - n.watch_list << WatchEntry{ path: path callback: callback - fd: fd + pid: 0 } println('Added watch for: ${path}') @@ -67,7 +64,9 @@ pub fn (mut n Notifier) add_watch(path string, callback NotifyCallback) ! { pub fn (mut n Notifier) remove_watch(path string) ! { for i, entry in n.watch_list { if entry.path == path { - n.watcher.remove(entry.fd) or { return err } + if entry.pid > 0 { + os.system('kill ${entry.pid}') + } n.watch_list.delete(i) println('Removed watch for: ${path}') return @@ -86,31 +85,67 @@ pub fn (mut n Notifier) start() ! { } n.is_watching = true - go n.watch_loop() + + if n.watch_list.len > 1 { + return error('only support watchers with len 1 for now') + } + + // Start a watcher for each path + for mut entry in n.watch_list { + // spawn n.watch_path(mut entry) + n.watch_path(mut entry) + } } // stop stops watching for events pub fn (mut n Notifier) stop() { n.is_watching = false -} - -fn (mut n Notifier) watch_loop() { - for n.is_watching { - event := n.watcher.wait(time.Duration(time.hour * 1)) - println(event) - panic('implement') - // for entry in n.watch_list { - // if event.fd == entry.fd { - // mut notify_event := NotifyEvent.modify - // if event.kind == .create { - // notify_event = .create - // } else if event.kind == .write { - // notify_event = .write - // } - // if entry.callback != none { - // entry.callback(notify_event, entry.path) - // } - // } - // } + // Kill all fswatch processes + for entry in n.watch_list { + if entry.pid > 0 { + os.system('kill ${entry.pid}') + } } } + +fn (mut n Notifier) watch_path(mut entry WatchEntry) { + // Start fswatch process + mut p := os.new_process('/opt/homebrew/bin/fswatch') + p.set_args(['-x', '--event-flags', entry.path]) + p.set_redirect_stdio() + p.run() + + entry.pid = p.pid + + for n.is_watching { + line := p.stdout_read() + if line.len > 0 { + parts := line.split(' ') + if parts.len >= 2 { + path := parts[0] + flags := parts[1] + + mut event := NotifyEvent.modify // Default to modify + + // Parse fswatch event flags + // See: https://emcrisostomo.github.io/fswatch/doc/1.17.1/fswatch.html#Event-Flags + if flags.contains('Created') { + event = .create + } else if flags.contains('Removed') { + event = .delete + } else if flags.contains('Renamed') { + event = .rename + } else if flags.contains('Updated') || flags.contains('Modified') { + event = .modify + } + + if cb := entry.callback { + cb(event, path, n.args) + } + } + } + time.sleep(100 * time.millisecond) + } + + p.close() +} diff --git a/lib/osal/notifier/notifier_test.v b/lib/osal/notifier/notifier_test.v deleted file mode 100644 index ea1698eb..00000000 --- a/lib/osal/notifier/notifier_test.v +++ /dev/null @@ -1,136 +0,0 @@ -module notifier - -import time -import os - -const test_file = 'test_watch.txt' -const test_file2 = 'test_watch2.txt' - -fn testsuite_begin() { - if os.exists(test_file) { - os.rm(test_file) or {} - } -} - -fn testsuite_end() { - if os.exists(test_file) { - os.rm(test_file) or {} - } -} - -fn test_notifier() { - mut event_received := false - mut last_event := NotifyEvent.create - - on_file_change := fn [mut event_received, mut last_event] (event NotifyEvent, path string) { - event_received = true - last_event = event - } - - // Create notifier - mut n := new('test_watcher')! - - // Create test file - os.write_file(test_file, 'initial content')! - - // Add watch - n.add_watch(test_file, on_file_change)! - - // Start watching - n.start()! - - // Test file modification - time.sleep(100 * time.millisecond) - os.write_file(test_file, 'modified content')! - time.sleep(500 * time.millisecond) - - assert event_received == true - assert last_event == .modify - - // Test file deletion - event_received = false - os.rm(test_file)! - time.sleep(500 * time.millisecond) - - assert event_received == true - assert last_event == .delete - - // Stop watching - n.stop() -} - -fn test_multiple_watches() { - mut events_count := 0 - - on_any_change := fn [mut events_count] (event NotifyEvent, path string) { - events_count++ - } - - // Create notifier - mut n := new('multi_watcher')! - - // Create test files - os.write_file(test_file, 'file1')! - os.write_file(test_file2, 'file2')! - - // Add watches - n.add_watch(test_file, on_any_change)! - n.add_watch(test_file2, on_any_change)! - - // Start watching - n.start()! - - // Modify both files - time.sleep(100 * time.millisecond) - os.write_file(test_file, 'file1 modified')! - os.write_file(test_file2, 'file2 modified')! - time.sleep(500 * time.millisecond) - - assert events_count == 2 - - // Cleanup - n.stop() - os.rm(test_file)! - os.rm(test_file2)! -} - -fn test_remove_watch() { - mut events_count := 0 - - on_change := fn [mut events_count] (event NotifyEvent, path string) { - events_count++ - } - - // Create notifier - mut n := new('remove_test')! - - // Create test file - os.write_file(test_file, 'content')! - - // Add watch - n.add_watch(test_file, on_change)! - - // Start watching - n.start()! - - // Modify file - time.sleep(100 * time.millisecond) - os.write_file(test_file, 'modified')! - time.sleep(500 * time.millisecond) - - assert events_count == 1 - - // Remove watch - n.remove_watch(test_file)! - - // Modify file again - os.write_file(test_file, 'modified again')! - time.sleep(500 * time.millisecond) - - // Should still be 1 since watch was removed - assert events_count == 1 - - // Cleanup - n.stop() - os.rm(test_file)! -} diff --git a/lib/osal/notifier/readme.md b/lib/osal/notifier/readme.md index d3951891..79405cba 100644 --- a/lib/osal/notifier/readme.md +++ b/lib/osal/notifier/readme.md @@ -1,21 +1,24 @@ -# Notifier Module +# Notifier -The Notifier module provides a simple and efficient way to monitor file system changes in V programs. It wraps the OS-level file system notification mechanisms and provides a clean API for watching files and directories. +A file system notification system for V that provides real-time monitoring of file system events using `fswatch`. + +## Dependencies + +- `fswatch`: Must be installed on your system. The notifier will check for its presence and return an error if not found. ## Features -- Watch multiple files/paths simultaneously -- Event-based callbacks for file changes -- Support for different types of events (create, modify, delete) -- Clean API for adding and removing watches +- Monitor file system events (create, modify, delete, rename) +- Multiple watch paths support +- Customizable event callbacks +- Clean start/stop functionality -## Usage - -### Basic Example +## Usage Example ```v import freeflowuniverse.herolib.osal.notifier +// Define callback function for file events fn on_file_change(event notifier.NotifyEvent, path string) { match event { .create { println('File created: ${path}') } @@ -26,111 +29,18 @@ fn on_file_change(event notifier.NotifyEvent, path string) { } fn main() { - // Create a new notifier + // Create a new notifier instance mut n := notifier.new('my_watcher')! - // Add a file to watch - n.add_watch('path/to/file.txt', on_file_change)! + // Add a path to watch + n.add_watch('/path/to/watch', on_file_change)! // Start watching n.start()! - // Keep the program running - for {} -} -``` + // ... your application logic ... -### Advanced Usage - -```v -import freeflowuniverse.herolib.osal.notifier - -fn main() { - mut n := notifier.new('config_watcher')! - - // Watch multiple files - n.add_watch('config.json', on_config_change)! - n.add_watch('data.txt', on_data_change)! - - // Start watching - n.start()! - - // ... do other work ... - // Stop watching when done n.stop() } - -fn on_config_change(event notifier.NotifyEvent, path string) { - if event == .modify { - println('Config file changed, reloading...') - // Reload configuration - } -} - -fn on_data_change(event notifier.NotifyEvent, path string) { - println('Data file changed: ${event}') -} ``` - -## API Reference - -### Structs - -#### Notifier -```v -pub struct Notifier { -pub mut: - name string - is_watching bool -} -``` - -### Functions - -#### new -```v -pub fn new(name string) !&Notifier -``` -Creates a new Notifier instance with the given name. - -#### add_watch -```v -pub fn (mut n Notifier) add_watch(path string, callback NotifyCallback) ! -``` -Adds a path to watch with an associated callback function. - -#### remove_watch -```v -pub fn (mut n Notifier) remove_watch(path string) ! -``` -Removes a watched path. - -#### start -```v -pub fn (mut n Notifier) start() ! -``` -Begins watching for file system events. - -#### stop -```v -pub fn (mut n Notifier) stop() -``` -Stops watching for events. - -## Error Handling - -The module uses V's error handling system. Most functions return a `!` type, indicating they can fail. Always handle potential errors appropriately: - -```v -n := notifier.new('watcher') or { - println('Failed to create notifier: ${err}') - return -} -``` - -## Notes - -- The notifier uses OS-level file system notification mechanisms for efficiency -- Callbacks are executed in a separate thread to avoid blocking -- Always call `stop()` when you're done watching to clean up resources diff --git a/lib/osal/package_test.v b/lib/osal/package_test.v index f635abaa..540996fe 100644 --- a/lib/osal/package_test.v +++ b/lib/osal/package_test.v @@ -13,24 +13,23 @@ fn test_package_management() { } } - // First ensure wget is not installed - package_remove('wget') or {} + is_wget_installed := cmd_exists('wget') - // Verify wget is not installed - assert !cmd_exists('wget') + if is_wget_installed { + // Clean up - remove wget + package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' } + assert !cmd_exists('wget') + // Reinstalling wget as it was previously installed + package_install('wget') or { assert false, 'Failed to install wget: ${err}' } + assert cmd_exists('wget') + return + } - // Update package list - package_refresh() or { assert false, 'Failed to refresh package list: ${err}' } - - // Install wget + // Intstall wget and verify it is installed package_install('wget') or { assert false, 'Failed to install wget: ${err}' } - - // Verify wget is now installed assert cmd_exists('wget') // Clean up - remove wget package_remove('wget') or { assert false, 'Failed to remove wget: ${err}' } - - // Verify wget is removed assert !cmd_exists('wget') } diff --git a/lib/osal/screen/readme.md b/lib/osal/screen/readme.md index 28cd2048..6cca674e 100644 --- a/lib/osal/screen/readme.md +++ b/lib/osal/screen/readme.md @@ -11,17 +11,16 @@ Create a file `screen_example.vsh`: import freeflowuniverse.herolib.osal.screen -// Create a new screen session with hardcoded parameters -mut s := screen.Screen{ - name: 'test_session' - cmd: '/bin/bash' // Default shell -} +// Create a new screen factory +mut sf := screen.new()! -// Check if screen is running -is_running := s.is_running() or { - println('Error checking screen status: ${err}') - return -} +// Add a new screen session +mut s := sf.add( + name: 'myscreen' + cmd: '/bin/bash' // optional, defaults to /bin/bash + start: true // optional, defaults to true + attach: false // optional, defaults to false +)! // Get session status status := s.status() or { @@ -40,6 +39,12 @@ s.attach() or { println('Error attaching: ${err}') return } + +// Kill the screen when done +sf.kill('myscreen') or { + println('Error killing screen: ${err}') + return +} ``` ## Basic Screen Commands diff --git a/lib/osal/screen/screen.v b/lib/osal/screen/screen.v index f91e98ad..d2a59f73 100644 --- a/lib/osal/screen/screen.v +++ b/lib/osal/screen/screen.v @@ -7,8 +7,8 @@ import os import time @[heap] -struct Screen { -mut: +pub struct Screen { +pub mut: cmd string name string pid int @@ -16,7 +16,7 @@ mut: // factory ?&ScreensFactory @[skip; str: skip] } -enum ScreenState { +pub enum ScreenState { unknown detached } @@ -31,17 +31,25 @@ pub enum ScreenStatus { pub fn (self Screen) status() !ScreenStatus { // Command to list screen sessions cmd := 'screen -ls' - response := osal.execute_silent(cmd)! + + ls_response := os.execute(cmd) + if ls_response.exit_code != 0 { + if ls_response.output.contains('No Sockets found') { + return .inactive + } + + return error('failed to list screen sessions: ${ls_response.output}') + } // Check if the screen session exists by looking for the session name in the output - if !response.contains(self.name) { + if !ls_response.output.contains(self.name) { return .inactive } // Command to send a dummy command to the screen session and check response - cmd_check := "screen -S ${self.name} -X eval \"stuff \\\"\\003\\\"; sleep 0.1; stuff \\\"ps\\n\\\"\"" + cmd_check := "screen -S ${self.name} -X stuff $'\003' && sleep 0.1 && screen -S ${self.name} -X stuff $'ps\n'" osal.execute_silent(cmd_check)! - + time.sleep(100 * time.millisecond) // Command to check if there is an active process in the screen session cmd_ps := 'screen -S ${self.name} -X hardcopy -h /tmp/screen_output; cat /tmp/screen_output' ps_response := osal.execute_silent(cmd_ps)! diff --git a/lib/osal/screen/screen_test.v b/lib/osal/screen/screen_test.v index f56cd77a..18278a8f 100644 --- a/lib/osal/screen/screen_test.v +++ b/lib/osal/screen/screen_test.v @@ -29,11 +29,6 @@ pub fn testsuite_begin() ! { cleanup_test_screens()! } -// Cleanup after all tests -pub fn testsuite_end() ! { - cleanup_test_screens()! -} - fn cleanup_test_screens() ! { mut screen_factory := new(reset: false)! screen_factory.scan()! @@ -80,6 +75,9 @@ fn create_and_verify_screen(mut screen_factory ScreensFactory, name string, cmd // Test screen creation and basic status pub fn test_screen_creation() ! { + defer { + cleanup_test_screens() or { panic('failed to cleanup test screens: ${err}') } + } mut screen_factory := new(reset: false)! mut screen := create_and_verify_screen(mut &screen_factory, test_screen_name, '/bin/bash')! @@ -90,6 +88,9 @@ pub fn test_screen_creation() ! { // Test command sending functionality pub fn test_screen_cmd_send() ! { + defer { + cleanup_test_screens() or { panic('failed to cleanup test screens: ${err}') } + } mut screen_factory := new(reset: false)! mut screen := create_and_verify_screen(mut &screen_factory, test_screen_name, '/bin/bash')! @@ -106,6 +107,9 @@ pub fn test_screen_cmd_send() ! { // Test error cases pub fn test_screen_errors() ! { + defer { + cleanup_test_screens() or { panic('failed to cleanup test screens: ${err}') } + } mut screen_factory := new(reset: false)! // Test invalid screen name @@ -127,6 +131,9 @@ pub fn test_screen_errors() ! { // Test multiple screens pub fn test_multiple_screens() ! { + defer { + cleanup_test_screens() or { panic('failed to cleanup test screens: ${err}') } + } mut screen_factory := new(reset: false)! screen1_name := '${test_screen_name}_1' diff --git a/lib/osal/tmux/testdata/tmux_window_test.v b/lib/osal/tmux/testdata/tmux_window_test.v deleted file mode 100644 index 686d742e..00000000 --- a/lib/osal/tmux/testdata/tmux_window_test.v +++ /dev/null @@ -1,67 +0,0 @@ -module tmux - -import freeflowuniverse.herolib.osal -import freeflowuniverse.herolib.installers.tmux -import freeflowuniverse.herolib.ui.console - -// uses single tmux instance for all tests -__global ( - tmux Tmux -) - -fn init() { - tmux = get_remote('185.69.166.152')! - - // reset tmux for tests - if tmux.is_running() { - tmux.stop() or { panic('Cannot stop tmux') } - } -} - -fn testsuite_end() { - if tmux.is_running() { - tmux.stop()! - } -} - -fn test_window_new() { - tmux.start() or { panic("can't start tmux: ${err}") } - - // test window new with only name arg - window_args := WindowArgs{ - name: 'TestWindow' - } - - assert !tmux.sessions.keys().contains('main') - - mut window := tmux.window_new(window_args) or { panic("Can't create new window: ${err}") } - assert tmux.sessions.keys().contains('main') - window.delete() or { panic('Cant delete window') } -} - -// // tests creating duplicate windows -// fn test_window_new0() { - -// -// installer := tmux.get_install( - -// mut tmux := Tmux { -// node: node_ssh -// } - -// window_args := WindowArgs { -// name: 'TestWindow0' -// } - -// // console.print_debug(tmux) -// mut window := tmux.window_new(window_args) or { -// panic("Can't create new window: $err") -// } -// assert tmux.sessions.keys().contains('main') -// mut window_dup := tmux.window_new(window_args) or { -// panic("Can't create new window: $err") -// } -// console.print_debug(node_ssh.exec('tmux ls') or { panic("fail:$err")}) -// window.delete() or { panic("Cant delete window") } -// // console.print_debug(tmux) -// } diff --git a/lib/osal/tmux/tmux.v b/lib/osal/tmux/tmux.v index 7dbb3686..3c34e9e6 100644 --- a/lib/osal/tmux/tmux.v +++ b/lib/osal/tmux/tmux.v @@ -23,20 +23,20 @@ pub fn new(args TmuxNewArgs) !Tmux { mut t := Tmux{ sessionid: args.sessionid } - t.load()! + // t.load()! t.scan()! return t } -// loads tmux session, populate the object -pub fn (mut tmux Tmux) load() ! { - isrunning := tmux.is_running()! - if !isrunning { - tmux.start()! - } - // console.print_debug("SCAN") - tmux.scan()! -} +// // loads tmux session, populate the object +// pub fn (mut tmux Tmux) load() ! { +// // isrunning := tmux.is_running()! +// // if !isrunning { +// // tmux.start()! +// // } +// // console.print_debug("SCAN") +// tmux.scan()! +// } pub fn (mut t Tmux) stop() ! { $if debug { @@ -91,19 +91,18 @@ pub fn (mut t Tmux) windows_get() []&Window { // checks whether tmux server is running pub fn (mut t Tmux) is_running() !bool { - res := osal.exec(cmd: 'tmux info', stdout: false, name: 'tmux_info', raise_error: false) or { - panic('bug') - } - if res.error.contains('no server running') { - // console.print_debug(" TMUX NOT RUNNING") - return false - } - if res.error.contains('no current client') { - return true - } - if res.exit_code > 0 { - return error('could not execute tmux info.\n${res}') + res := os.execute('tmux info') + if res.exit_code != 0 { + if res.output.contains('no server running') { + // console.print_debug(" TMUX NOT RUNNING") + return false + } + if res.output.contains('no current client') { + return true + } + return error('could not execute tmux info.\n${res.output}') } + return true } diff --git a/lib/osal/tmux/tmux_scan.v b/lib/osal/tmux/tmux_scan.v index f385262d..bbe2ae33 100644 --- a/lib/osal/tmux/tmux_scan.v +++ b/lib/osal/tmux/tmux_scan.v @@ -58,6 +58,9 @@ pub fn (mut t Tmux) scan() ! { cmd_list_session := "tmux list-sessions -F '#{session_name}'" exec_list := osal.exec(cmd: cmd_list_session, stdout: false, name: 'tmux_list') or { + if err.msg().contains('no server running') { + return + } return error('could not execute list sessions.\n${err}') } @@ -80,7 +83,7 @@ pub fn (mut t Tmux) scan() ! { } console.print_debug(t) - + println('t: ${t}') // mut done := map[string]bool{} cmd := "tmux list-panes -a -F '#{session_name}|#{window_name}|#{window_id}|#{pane_active}|#{pane_id}|#{pane_pid}|#{pane_start_command}'" out := osal.execute_silent(cmd) or { return error("Can't execute ${cmd} \n${err}") } diff --git a/lib/osal/tmux/testdata/tmux_session_test.v b/lib/osal/tmux/tmux_session_test.v similarity index 95% rename from lib/osal/tmux/testdata/tmux_session_test.v rename to lib/osal/tmux/tmux_session_test.v index 5be65c1d..e4822c41 100644 --- a/lib/osal/tmux/testdata/tmux_session_test.v +++ b/lib/osal/tmux/tmux_session_test.v @@ -1,7 +1,7 @@ module tmux import freeflowuniverse.herolib.osal -import freeflowuniverse.herolib.installers.tmux +// import freeflowuniverse.herolib.installers.tmux // fn testsuite_end() { @@ -26,13 +26,13 @@ fn test_session_create() { mut s := Session{ tmux: &tmux - windows: map[string]&Window{} + windows: []&Window{} name: 'testsession' } mut s2 := Session{ tmux: &tmux - windows: map[string]&Window{} + windows: []&Window{} name: 'testsession2' } diff --git a/lib/osal/tmux/tmux_window.v b/lib/osal/tmux/tmux_window.v index ad60d4c0..de047078 100644 --- a/lib/osal/tmux/tmux_window.v +++ b/lib/osal/tmux/tmux_window.v @@ -191,7 +191,7 @@ pub fn (mut w Window) stop() ! { stdout: false name: 'tmux_kill-window' // die: false - ) or { return error("Can't kill window with id:${w.id}") } + ) or { return error("Can't kill window with id:${w.id}: ${err}") } w.pid = 0 w.active = false } diff --git a/lib/osal/tmux/tmux_window_test.v b/lib/osal/tmux/tmux_window_test.v new file mode 100644 index 00000000..5bb228d0 --- /dev/null +++ b/lib/osal/tmux/tmux_window_test.v @@ -0,0 +1,67 @@ +module tmux + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.ui.console +import time + +// uses single tmux instance for all tests + +fn testsuite_begin() { + muttmux := new() or { panic('Cannot create tmux: ${err}') } + + // reset tmux for tests + is_running := tmux.is_running() or { panic('cannot check if tmux is running: ${err}') } + if is_running { + tmux.stop() or { panic('Cannot stop tmux: ${err}') } + } +} + +fn testsuite_end() { + is_running := is_running() or { panic('cannot check if tmux is running: ${err}') } + if is_running { + stop() or { panic('Cannot stop tmux: ${err}') } + } +} + +fn test_window_new() ! { + mut tmux_ := new()! + + // test window new with only name arg + window_args := WindowArgs{ + name: 'TestWindow' + } + + assert tmux_.sessions.filter(it.name == 'main').len == 0 + + mut window := tmux_.window_new(window_args)! + assert tmux_.sessions.filter(it.name == 'main').len > 0 + // time.sleep(1000 * time.millisecond) + // window.stop()! +} + +// tests creating duplicate windows +fn test_window_new0() { + + + installer := tmux.get_install( + + mut tmux := Tmux { + node: node_ssh + } + + window_args := WindowArgs { + name: 'TestWindow0' + } + + // console.print_debug(tmux) + mut window := tmux.window_new(window_args) or { + panic("Can't create new window: $err") + } + assert tmux.sessions.keys().contains('main') + mut window_dup := tmux.window_new(window_args) or { + panic("Can't create new window: $err") + } + console.print_debug(node_ssh.exec('tmux ls') or { panic("fail:$err")}) + window.delete() or { panic("Cant delete window") } + // console.print_debug(tmux) +} diff --git a/lib/osal/zinit/rpc_test.v b/lib/osal/zinit/rpc_test.v index 003a953d..bd9c6bcd 100644 --- a/lib/osal/zinit/rpc_test.v +++ b/lib/osal/zinit/rpc_test.v @@ -2,13 +2,38 @@ module zinit import os import time +import freeflowuniverse.herolib.core +import freeflowuniverse.herolib.osal fn test_zinit() { + if !core.is_linux()! { + // zinit is only supported on linux + return + } + + // TODO: use zinit installer to install zinit + // this is a workaround since we can't import zinit installer due to circular dependency + zinit_version := os.execute('zinit --version') + if zinit_version.exit_code != 0 { + release_url := 'https://github.com/threefoldtech/zinit/releases/download/v0.2.14/zinit' + + mut dest := osal.download( + url: release_url + minsize_kb: 2000 + reset: true + dest: '/tmp/zinit' + )! + + chmod_cmd := os.execute('chmod +x /tmp/zinit') + assert chmod_cmd.exit_code == 0, 'failed to chmod +x /tmp/zinit: ${chmod_cmd.output}' + } + + this_dir := os.dir(@FILE) // you need to have zinit in your path to run this test - spawn os.execute('zinit -s herolib/osal/zinit/zinit/zinit.sock init -c herolib/osal/zinit/zinit') + spawn os.execute('/tmp/zinit -s ${this_dir}/zinit/zinit.sock init -c ${this_dir}/zinit') time.sleep(time.second) - client := new_rpc_client('herolib/osal/zinit/zinit/zinit.sock') + client := new_rpc_client(socket_path: '${this_dir}/zinit/zinit.sock') mut ls := client.list()! mut want_ls := { @@ -57,4 +82,7 @@ fn test_zinit() { time.sleep(time.millisecond * 10) st = client.status('service_1')! assert st.state.contains('SIGTERM') + + // Remove the socet file + os.rm('${this_dir}/zinit/zinit.sock')! } diff --git a/lib/security/authentication/authenticator.v b/lib/security/authentication/authenticator.v index f6f4335b..845af260 100644 --- a/lib/security/authentication/authenticator.v +++ b/lib/security/authentication/authenticator.v @@ -14,7 +14,7 @@ import log pub struct Authenticator { secret string mut: - config SmtpConfig @[required] + config SmtpConfig @[required] backend IBackend // Backend for authenticator } @@ -56,7 +56,7 @@ pub fn new(config AuthenticatorConfig) !Authenticator { // password: config.smtp.password // )! backend: config.backend - secret: config.secret + secret: config.secret } } @@ -92,24 +92,24 @@ pub fn (mut auth Authenticator) send_verification_mail(config SendMailConfig) ! // create auth session auth_code := rand.bytes(64) or { panic(err) } auth.backend.create_auth_session( - email: config.email + email: config.email auth_code: auth_code.hex() - timeout: time.now().add_seconds(180) + timeout: time.now().add_seconds(180) )! link := 'Click to authenticate' mail := smtp.Mail{ - to: config.email - from: config.mail.from - subject: config.mail.subject + to: config.email + from: config.mail.from + subject: config.mail.subject body_type: .html - body: '${config.mail.body}\n${link}' + body: '${config.mail.body}\n${link}' } mut client := smtp.new_client( - server: auth.config.server - from: auth.config.from - port: auth.config.port + server: auth.config.server + from: auth.config.from + port: auth.config.port username: auth.config.username password: auth.config.password )! @@ -128,17 +128,17 @@ pub fn (mut auth Authenticator) send_login_link(config SendMailConfig) ! { encoded_signature := base64.url_encode(signature.bytestr().bytes()) link := 'Click to login' mail := smtp.Mail{ - to: config.email - from: config.mail.from - subject: config.mail.subject + to: config.email + from: config.mail.from + subject: config.mail.subject body_type: .html - body: '${config.mail.body}\n${link}' + body: '${config.mail.body}\n${link}' } mut client := smtp.new_client( - server: auth.config.server - from: auth.config.from - port: auth.config.port + server: auth.config.server + from: auth.config.from + port: auth.config.port username: auth.config.username password: auth.config.password )! @@ -222,7 +222,7 @@ pub fn (mut auth Authenticator) authenticate(email string, cypher string) ! { } pub struct AwaitAuthParams { - email string @[required] + email string @[required] timeout time.Duration = 3 * time.minute } diff --git a/lib/security/authentication/backend_database.v b/lib/security/authentication/backend_database.v index b133b7dc..7cb84cf0 100644 --- a/lib/security/authentication/backend_database.v +++ b/lib/security/authentication/backend_database.v @@ -8,7 +8,7 @@ import time @[noinit] struct DatabaseBackend { mut: - db sqlite.DB + db sqlite.DB } @[params] diff --git a/lib/security/authentication/client.v b/lib/security/authentication/client.v index 2e38f2a4..cce8ef9f 100644 --- a/lib/security/authentication/client.v +++ b/lib/security/authentication/client.v @@ -33,8 +33,8 @@ fn (client EmailClient) post_request(params PostParams) !http.Response { // verify_email posts an email verification req to the email auth controller pub fn (client EmailClient) email_authentication(params SendMailConfig) ! { client.post_request( - url: '${client.url}/email_authentication' - data: json.encode(params) + url: '${client.url}/email_authentication' + data: json.encode(params) timeout: 180 * time.second )! } @@ -42,8 +42,8 @@ pub fn (client EmailClient) email_authentication(params SendMailConfig) ! { // verify_email posts an email verification req to the email auth controller pub fn (client EmailClient) is_verified(address string) !bool { resp := client.post_request( - url: '${client.url}/is_verified' - data: json.encode(address) + url: '${client.url}/is_verified' + data: json.encode(address) timeout: 180 * time.second )! return resp.body == 'true' @@ -52,7 +52,7 @@ pub fn (client EmailClient) is_verified(address string) !bool { // send_verification_email posts an email verification req to the email auth controller pub fn (client EmailClient) send_verification_email(params SendMailConfig) ! { client.post_request( - url: '${client.url}/send_verification_mail' + url: '${client.url}/send_verification_mail' data: json.encode(params) ) or { return error(err.msg()) } } @@ -61,7 +61,7 @@ pub fn (client EmailClient) send_verification_email(params SendMailConfig) ! { pub fn (c EmailClient) authenticate(address string, cypher string) !AttemptResult { resp := http.post('${c.url}/authenticate', json.encode(AuthAttempt{ address: address - cypher: cypher + cypher: cypher }))! result := json.decode(AttemptResult, resp.body)! return result diff --git a/lib/security/authentication/controller_test.v b/lib/security/authentication/controller_test.v index 4394274a..cdf8a740 100644 --- a/lib/security/authentication/controller_test.v +++ b/lib/security/authentication/controller_test.v @@ -15,9 +15,9 @@ fn test_new_controller() { } client := smtp.Client{ - server: 'smtp-relay.brevo.com' - from: 'verify@authenticator.io' - port: 587 + server: 'smtp-relay.brevo.com' + from: 'verify@authenticator.io' + port: 587 username: env.value('BREVO_SMTP_USERNAME').string() password: env.value('BREVO_SMTP_PASSWORD').string() } diff --git a/lib/security/authentication/email_authentication.v b/lib/security/authentication/email_authentication.v index 0544d996..924efa3c 100644 --- a/lib/security/authentication/email_authentication.v +++ b/lib/security/authentication/email_authentication.v @@ -5,28 +5,30 @@ import crypto.hmac import crypto.sha256 import encoding.hex import encoding.base64 -import freeflowuniverse.herolib.clients.mailclient {MailClient} +import freeflowuniverse.herolib.clients.mailclient { MailClient } pub struct StatelessAuthenticator { pub: secret string pub mut: - mail_client MailClient + mail_client MailClient } - pub fn new_stateless_authenticator(authenticator StatelessAuthenticator) !StatelessAuthenticator { +pub fn new_stateless_authenticator(authenticator StatelessAuthenticator) !StatelessAuthenticator { // TODO: do some checks - return StatelessAuthenticator {...authenticator} + return StatelessAuthenticator{ + ...authenticator + } } pub struct AuthenticationMail { RedirectURLs pub: - to string // email address being authentcated - from string = 'email_authenticator@herolib.tf' - subject string = 'Verify your email' - body string = 'Please verify your email by clicking the link below' - callback string // callback url of authentication link + to string // email address being authentcated + from string = 'email_authenticator@herolib.tf' + subject string = 'Verify your email' + body string = 'Please verify your email by clicking the link below' + callback string // callback url of authentication link success_url string // where the user will be redirected upon successful authentication failure_url string // where the user will be redirected upon failed authentication } @@ -35,14 +37,14 @@ pub fn (mut a StatelessAuthenticator) send_authentication_mail(mail Authenticati link := a.new_authentication_link(mail.to, mail.callback, mail.RedirectURLs)! button := 'Verify Email' - // send email with link in body - a.mail_client.send( - to: mail.to - from: mail.from - subject: mail.subject - body_type: .html - body: $tmpl('./templates/mail.html') - ) or { return error('Error resolving email address $err') } + // send email with link in body + a.mail_client.send( + to: mail.to + from: mail.from + subject: mail.subject + body_type: .html + body: $tmpl('./templates/mail.html') + ) or { return error('Error resolving email address ${err}') } } @[params] @@ -59,48 +61,39 @@ fn (a StatelessAuthenticator) new_authentication_link(email string, callback str // sign email address and expiration of authentication link expiration := time.now().add(5 * time.minute) - data := '${email}.${expiration}' // data to be signed + data := '${email}.${expiration}' // data to be signed // QUESTION? should success url also be signed for security? - signature := hmac.new( - hex.decode(a.secret)!, - data.bytes(), - sha256.sum, - sha256.block_size - ) - encoded_signature := base64.url_encode(signature.bytestr().bytes()) + signature := hmac.new(hex.decode(a.secret)!, data.bytes(), sha256.sum, sha256.block_size) + encoded_signature := base64.url_encode(signature.bytestr().bytes()) mut queries := '' if urls.success_url != '' { encoded_url := base64.url_encode(urls.success_url.bytes()) queries += '?success_url=${encoded_url}' } - return "${callback}/${email}/${expiration.unix()}/${encoded_signature}${queries}" + return '${callback}/${email}/${expiration.unix()}/${encoded_signature}${queries}' } pub struct AuthenticationAttempt { pub: - email string - expiration time.Time - signature string + email string + expiration time.Time + signature string } // sends mail with login link -pub fn (auth StatelessAuthenticator) authenticate(attempt AuthenticationAttempt) ! { - if time.now() > attempt.expiration { - return error('link expired') - } +pub fn (auth StatelessAuthenticator) authenticate(attempt AuthenticationAttempt) ! { + if time.now() > attempt.expiration { + return error('link expired') + } - data := '${attempt.email}.${attempt.expiration}' // data to be signed - signature_mirror := hmac.new( - hex.decode(auth.secret) or {panic(err)}, - data.bytes(), - sha256.sum, - sha256.block_size - ).bytestr().bytes() + data := '${attempt.email}.${attempt.expiration}' // data to be signed + signature_mirror := hmac.new(hex.decode(auth.secret) or { panic(err) }, data.bytes(), + sha256.sum, sha256.block_size).bytestr().bytes() - decoded_signature := base64.url_decode(attempt.signature) + decoded_signature := base64.url_decode(attempt.signature) - if !hmac.equal(decoded_signature, signature_mirror) { - return error('signature mismatch') - } + if !hmac.equal(decoded_signature, signature_mirror) { + return error('signature mismatch') + } } diff --git a/lib/security/jwt/jwt.v b/lib/security/jwt/jwt.v index 9f70712b..126209dd 100644 --- a/lib/security/jwt/jwt.v +++ b/lib/security/jwt/jwt.v @@ -39,7 +39,7 @@ pub: // DOESN'T handle data encryption, sensitive data should be encrypted pub fn create_token(payload_ JwtPayload) JsonWebToken { return JsonWebToken{ - JwtHeader: JwtHeader{'HS256', 'JWT'} + JwtHeader: JwtHeader{'HS256', 'JWT'} JwtPayload: JwtPayload{ ...payload_ iat: time.now() @@ -92,7 +92,7 @@ pub fn (token SignedJWT) decode() !JsonWebToken { payload_json := base64.url_decode(payload_urlencoded).bytestr() payload := json.decode(JwtPayload, payload_json) or { panic('Decoding payload: ${err}') } return JsonWebToken{ - JwtHeader: header + JwtHeader: header JwtPayload: payload } } diff --git a/lib/threefold/grid4/cloudslices/model.v b/lib/threefold/grid4/cloudslices/model.v index 54824040..ab8f31e7 100644 --- a/lib/threefold/grid4/cloudslices/model.v +++ b/lib/threefold/grid4/cloudslices/model.v @@ -18,7 +18,7 @@ pub mut: ssd string url string reputation int - uptime int + uptime int // 0..100 continent string country string passmark int diff --git a/lib/threefold/grid4/farmingsimulator/factory.v b/lib/threefold/grid4/farmingsimulator/factory.v index 4a53e8c8..5b91b4f1 100644 --- a/lib/threefold/grid4/farmingsimulator/factory.v +++ b/lib/threefold/grid4/farmingsimulator/factory.v @@ -48,10 +48,9 @@ pub fn new(args_ SimulatorArgs) !Simulator { if args.git_url.len > 0 { mut gs := gittools.new()! mut repo := gs.get_repo( - url: args.git_url - pull: args.git_pull - reset: args.git_reset - reload: false + url: args.git_url + pull: args.git_pull + reset: args.git_reset )! args.path = repo.path() diff --git a/lib/threefold/grid4/gridsimulator/factory.v b/lib/threefold/grid4/gridsimulator/factory.v index 5ac238e9..01aa5a6a 100644 --- a/lib/threefold/grid4/gridsimulator/factory.v +++ b/lib/threefold/grid4/gridsimulator/factory.v @@ -53,10 +53,9 @@ pub fn new(args_ SimulatorArgs) !Simulator { if args.git_url.len > 0 { mut gs := gittools.new()! mut repo := gs.get_repo( - url: args.git_url - pull: args.git_pull - reset: args.git_reset - reload: false + url: args.git_url + pull: args.git_pull + reset: args.git_reset )! args.path = repo.path() diff --git a/lib/virt/docker/docker_container.v b/lib/virt/docker/docker_container.v index f3bf908c..34a69a75 100644 --- a/lib/virt/docker/docker_container.v +++ b/lib/virt/docker/docker_container.v @@ -37,21 +37,6 @@ pub mut: command string } -@[params] -pub struct DockerContainerCreateArgs { - name string - hostname string - forwarded_ports []string // ["80:9000/tcp", "1000, 10000/udp"] - mounted_volumes []string // ["/root:/root", ] - env map[string]string // map of environment variables that will be passed to the container - privileged bool - remove_when_done bool = true // remove the container when it shuts down -pub mut: - image_repo string - image_tag string - command string = '/bin/bash' -} - // create/start container (first need to get a dockercontainer before we can start) pub fn (mut container DockerContainer) start() ! { exec(cmd: 'docker start ${container.id}')! diff --git a/lib/virt/docker/docker_container_create.v b/lib/virt/docker/docker_container_create.v index 4ce9732f..1274ec0e 100644 --- a/lib/virt/docker/docker_container_create.v +++ b/lib/virt/docker/docker_container_create.v @@ -3,48 +3,144 @@ module docker import freeflowuniverse.herolib.osal { exec } import freeflowuniverse.herolib.virt.utils +@[params] +pub struct DockerContainerCreateArgs { +pub mut: + name string + hostname string + forwarded_ports []string // ["80:9000/tcp", "1000, 10000/udp"] + mounted_volumes []string // ["/root:/root", ] + env map[string]string // map of environment variables that will be passed to the container + privileged bool + remove_when_done bool = true // remove the container when it shuts down + image_repo string + image_tag string + command string +} + pub fn (mut e DockerEngine) container_create(args DockerContainerCreateArgs) !&DockerContainer { + // Validate required parameters + if args.name.trim_space() == '' { + return error('Container name cannot be empty') + } + + // Set default hostname if not provided + mut hostname := args.hostname + if hostname.trim_space() == '' { + hostname = args.name.replace('_', '-') + } + mut ports := '' mut mounts := '' mut env := '' mut command := args.command + // Build environment variables string with proper spacing for var, value in args.env { - env += '-e ${var}="${value}"' + if env != '' { + env += ' ' + } + env += '-e "${var}=${value}"' } + // Build ports string for port in args.forwarded_ports { - ports = ports + '-p ${port} ' + if ports != '' { + ports += ' ' + } + ports += '-p ${port}' } + // Build mounts string for mount in args.mounted_volumes { - mounts += '-v ${mount} ' + if mounts != '' { + mounts += ' ' + } + mounts += '-v ${mount}' } - mut image := '${args.image_repo}' + // Build image string + mut image := args.image_repo if args.image_tag != '' { - image = image + ':${args.image_tag}' + image += ':${args.image_tag}' + } else { + // Check if image exists with 'local' tag first + mut local_check := exec(cmd: 'docker images ${args.image_repo}:local -q', debug: true)! + if local_check.output != '' { + image += ':local' + } else { + // Default to latest if no tag specified + image += ':latest' + } } + // Set default image and command for threefold if image == 'threefold' || image == 'threefold:latest' || image == '' { image = 'threefoldtech/grid3_ubuntu_dev' command = '/usr/local/bin/boot.sh' } + // Verify image exists locally + mut image_check := exec(cmd: 'docker images ${image} -q')! + if image_check.output == '' { + return error('Docker image not found: ${image}. Please ensure the image exists locally or can be pulled from a registry.') + } + privileged := if args.privileged { '--privileged' } else { '' } - // if forwarded ports passed in the args not containing mapping tp ssh (22) create one + // Add SSH port if not present if !utils.contains_ssh_port(args.forwarded_ports) { - // find random free port in the node - mut port := e.get_free_port() or { panic('No free port.') } + mut port := e.get_free_port() or { return error('No free port available for SSH') } + if ports != '' { + ports += ' ' + } ports += '-p ${port}:22/tcp' } - exec( - cmd: 'docker run --hostname ${args.hostname} ${privileged} --sysctl net.ipv6.conf.all.disable_ipv6=0 --name ${args.name} ${ports} ${env} ${mounts} -d -t ${image} ${command}' - )! - // Have to reload the containers as container_get works from memory - e.containers_load()! - mut container := e.container_get(name: args.name)! + // Construct docker run command with proper spacing and escaping + mut mycmd := 'docker run' + if hostname != '' { + mycmd += ' --hostname "${hostname}"' + } + if privileged != '' { + mycmd += ' ${privileged}' + } + mycmd += ' --sysctl net.ipv6.conf.all.disable_ipv6=0' + mycmd += ' --name "${args.name}"' + if ports != '' { + mycmd += ' ${ports}' + } + if env != '' { + mycmd += ' ${env}' + } + if mounts != '' { + mycmd += ' ${mounts}' + } + mycmd += ' -d -t ${image}' + if command != '' { + mycmd += ' ${command}' + } + // Execute docker run command + exec(cmd: mycmd) or { + return error('Failed to create Docker container: +Command: ${mycmd} +Error: ${err} +Possible causes: +- Invalid image name or tag +- Container name already in use +- Port conflicts +- Insufficient permissions +Please check the error message and try again.') + } + + // Verify container was created successfully + e.containers_load() or { + return error('Container created but failed to reload container list: ${err}') + } + + mut container := e.container_get(name: args.name) or { + return error('Container created but not found in container list. This may indicate the container failed to start properly. Check container logs with: docker logs ${args.name}') + } + return container } diff --git a/lib/virt/docker/docker_recipe_env.v b/lib/virt/docker/docker_recipe_env.v index d8cccc7b..3804864b 100644 --- a/lib/virt/docker/docker_recipe_env.v +++ b/lib/virt/docker/docker_recipe_env.v @@ -25,5 +25,5 @@ pub fn (mut i EnvItem) check() ! { } pub fn (mut i EnvItem) render() !string { - return "ENV ${i.name}='${i.value}'" + return 'ENV ${i.name}="${i.value}"' } diff --git a/lib/virt/docker/docker_recipe_package.v b/lib/virt/docker/docker_recipe_package.v index 9db55e4b..e36920e6 100644 --- a/lib/virt/docker/docker_recipe_package.v +++ b/lib/virt/docker/docker_recipe_package.v @@ -79,12 +79,13 @@ pub fn (mut b DockerBuilderRecipe) add_package(args PackageArgs) ! { for packagenamecompare in package.names { if packagenamecompare == packagename { // we found a double - return error('Cannot add the package again, there is a double. ${packagename} \n${b}') + continue } } } } } + // console.print_debug(package) if package.names.len == 0 { return error('could not find package names.\n ${b}\nARGS:\n${args}') diff --git a/lib/virt/docker/docker_recipe_snippets.v b/lib/virt/docker/docker_recipe_snippets.v index 2101e07e..711da8d2 100644 --- a/lib/virt/docker/docker_recipe_snippets.v +++ b/lib/virt/docker/docker_recipe_snippets.v @@ -60,30 +60,42 @@ pub fn (mut r DockerBuilderRecipe) add_vbuilder() ! { // add ssh server and init scripts (note: zinit needs to be installed) pub fn (mut r DockerBuilderRecipe) add_sshserver() ! { - r.add_package(name: 'openssh-server')! + r.add_package(name: 'openssh-server, bash')! r.add_zinit_cmd( name: 'sshd-setup' oneshot: true exec: " + rm -rf /etc/ssh + mkdir -p /etc/ssh mkdir -p /run/sshd - ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa - ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa - ssh-keygen -f /etc/ssh/ssh_host_ecdsa_key -N '' -t ecdsa -b 521 ssh-keygen -f /etc/ssh/ssh_host_ed25519_key -N '' -t ed25519 + cat > /etc/ssh/sshd_config << 'EOF' +HostKey /etc/ssh/ssh_host_ed25519_key +PermitRootLogin prohibit-password +PasswordAuthentication no +ChallengeResponseAuthentication no +UsePAM no +X11Forwarding no +AllowTcpForwarding yes +AllowAgentForwarding yes +EOF " )! r.add_zinit_cmd( - name: 'ssh-keys' - after: 'sshd-setup' - exec: ' + name: 'ssh-keys' + after: 'sshd-setup' + oneshot: true + exec: ' if [ ! -d /root/.ssh ]; then mkdir -m 700 /root/.ssh fi - echo \$SSH_KEY >> /root/.ssh/authorized_keys - chmod 600 /root/.ssh/authorized_keys + if [ ! -z "\$SSH_KEY" ]; then + echo \$SSH_KEY >> /root/.ssh/authorized_keys + chmod 600 /root/.ssh/authorized_keys + fi ' )! diff --git a/lib/web/docusaurus/cfg/footer.json b/lib/web/docusaurus/cfg/footer.json new file mode 100644 index 00000000..cbef73ad --- /dev/null +++ b/lib/web/docusaurus/cfg/footer.json @@ -0,0 +1,40 @@ +{ + "style": "dark", + "links": [ + { + "title": "Docs", + "items": [ + { + "label": "Introduction", + "to": "/docs" + }, + { + "label": "TFGrid V4 Docs", + "href": "https://docs.threefold.io/" + } + ] + }, + { + "title": "Community", + "items": [ + { + "label": "Telegram", + "href": "https://t.me/threefold" + }, + { + "label": "X", + "href": "https://x.com/threefold_io" + } + ] + }, + { + "title": "Links", + "items": [ + { + "label": "ThreeFold.io", + "href": "https://threefold.io" + } + ] + } + ] +} diff --git a/lib/web/docusaurus/cfg/main.json b/lib/web/docusaurus/cfg/main.json new file mode 100644 index 00000000..8d823a4b --- /dev/null +++ b/lib/web/docusaurus/cfg/main.json @@ -0,0 +1,16 @@ +{ + "title": "Internet Geek", + "tagline": "Internet Geek", + "favicon": "img/favicon.png", + "url": "https://friends.threefold.info", + "url_home": "docs/", + "baseUrl": "/kristof/", + "image": "img/tf_graph.png", + "metadata": { + "description": "ThreeFold is laying the foundation for a geo aware Web 4, the next generation of the Internet.", + "image": "https://threefold.info/kristof/img/tf_graph.png", + "title": "ThreeFold Technology Vision" + }, + "buildDest":"root@info.ourworld.tf:/root/hero/www/info", + "buildDestDev":"root@info.ourworld.tf:/root/hero/www/infodev" +} diff --git a/lib/web/docusaurus/cfg/navbar.json b/lib/web/docusaurus/cfg/navbar.json new file mode 100644 index 00000000..76a13545 --- /dev/null +++ b/lib/web/docusaurus/cfg/navbar.json @@ -0,0 +1,15 @@ +{ + "title": "Kristof = Chief Executive Geek", + "items": [ + { + "href": "https://threefold.info/kristof/", + "label": "ThreeFold Technology", + "position": "right" + }, + { + "href": "https://threefold.io", + "label": "ThreeFold.io", + "position": "right" + } + ] +} diff --git a/lib/web/docusaurus/clean.v b/lib/web/docusaurus/clean.v new file mode 100644 index 00000000..b0d7674e --- /dev/null +++ b/lib/web/docusaurus/clean.v @@ -0,0 +1,95 @@ +module docusaurus + +import os +import strings + +pub fn (mut site DocSite) clean(args ErrorArgs) ! { + toclean := ' + /node_modules + + babel.config.js + + # Production + /build + + # Generated files + .docusaurus + .cache-loader + + # Misc + .DS_Store + .env.local + .env.development.local + .env.test.local + .env.production.local + + npm-debug.log* + yarn-debug.log* + yarn-error.log* + bun.lockb + bun.lock + + yarn.lock + + build.sh + build_dev.sh + build-dev.sh + develop.sh + install.sh + + package.json + package-lock.json + pnpm-lock.yaml + + sidebars.ts + + tsconfig.json + ' + + mut sb := strings.new_builder(200) + for line in toclean.split_into_lines() { + clean_line := line.trim_space() + if clean_line == '' || clean_line.starts_with('#') { + continue + } + + // Remove leading slash if present to make path relative + path_to_clean := if clean_line.starts_with('/') { + clean_line[1..] + } else { + clean_line + } + + full_path := os.join_path(site.path_src.path, path_to_clean) + + // Handle glob patterns (files ending with *) + if path_to_clean.ends_with('*') { + base_pattern := path_to_clean#[..-1] // Remove the * at the end + base_dir := os.dir(full_path) + if os.exists(base_dir) { + files := os.ls(base_dir) or { + sb.writeln('Failed to list directory ${base_dir}: ${err}') + continue + } + for file in files { + if file.starts_with(base_pattern) { + file_path := os.join_path(base_dir, file) + os.rm(file_path) or { sb.writeln('Failed to remove ${file_path}: ${err}') } + } + } + } + continue + } + + // Handle regular files and directories + if os.exists(full_path) { + if os.is_dir(full_path) { + os.rmdir_all(full_path) or { + sb.writeln('Failed to remove directory ${full_path}: ${err}') + } + } else { + os.rm(full_path) or { sb.writeln('Failed to remove file ${full_path}: ${err}') } + } + } + } +} diff --git a/lib/web/docusaurus/config.v b/lib/web/docusaurus/config.v new file mode 100644 index 00000000..011c58c2 --- /dev/null +++ b/lib/web/docusaurus/config.v @@ -0,0 +1,95 @@ +module docusaurus + +import json +import os + +// Footer config structures +pub struct FooterItem { +pub mut: + label string + to string + href string +} + +pub struct FooterLink { +pub mut: + title string + items []FooterItem +} + +pub struct Footer { +pub mut: + style string + links []FooterLink +} + +// Main config structure +pub struct MainMetadata { +pub mut: + description string + image string + title string +} + +pub struct Main { +pub mut: + name string + title string + tagline string + favicon string + url string + url_home string + base_url string @[json: 'baseUrl'] + image string + metadata MainMetadata + build_dest string @[json: 'buildDest'] + build_dest_dev string @[json: 'buildDestDev'] +} + +// Navbar config structures +pub struct NavbarItem { +pub mut: + href string + label string + position string +} + +pub struct Navbar { +pub mut: + title string + items []NavbarItem +} + +// Combined config structure +pub struct Config { +pub mut: + footer Footer + main Main + navbar Navbar +} + +// load_config loads all configuration from the specified directory +pub fn load_config(cfg_dir string) !Config { + // Ensure the config directory exists + if !os.exists(cfg_dir) { + return error('Config directory ${cfg_dir} does not exist') + } + + // Load and parse footer config + footer_content := os.read_file(os.join_path(cfg_dir, 'footer.json'))! + footer := json.decode(Footer, footer_content)! + + // Load and parse main config + main_content := os.read_file(os.join_path(cfg_dir, 'main.json'))! + main := json.decode(Main, main_content)! + + // Load and parse navbar config + navbar_content := os.read_file(os.join_path(cfg_dir, 'navbar.json'))! + navbar := json.decode(Navbar, navbar_content)! + + return Config{ + footer: footer + main: main + navbar: navbar + } +} diff --git a/lib/web/docusaurus/dsite.v b/lib/web/docusaurus/dsite.v new file mode 100644 index 00000000..91ea5a60 --- /dev/null +++ b/lib/web/docusaurus/dsite.v @@ -0,0 +1,298 @@ +module docusaurus + +import freeflowuniverse.herolib.osal.screen +import os +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.develop.gittools +import json +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct DocSite { +pub mut: + name string + url string + path_src pathlib.Path + path_build pathlib.Path + // path_publish pathlib.Path + args DSiteNewArgs + errors []SiteError + config Config +} + +@[params] +pub struct DSiteNewArgs { +pub mut: + name string + nameshort string + path string + url string + // publish_path string + build_path string + production bool + watch_changes bool = true + update bool +} + +pub fn (mut f DocusaurusFactory) build_dev(args_ DSiteNewArgs) !&DocSite { + mut s:=f.add(args_)! + s.generate()! + osal.exec( + cmd: ' + cd ${s.path_build.path} + bash build_dev.sh + ' + retry: 0 + )! + return s +} + +pub fn (mut f DocusaurusFactory) build(args_ DSiteNewArgs) !&DocSite { + mut s:=f.add(args_)! + s.generate()! + osal.exec( + cmd: ' + cd ${s.path_build.path} + bash build.sh + ' + retry: 0 + )! + return s +} + +pub fn (mut f DocusaurusFactory) dev(args_ DSiteNewArgs) !&DocSite { + mut s:=f.add(args_)! + + s.clean()! + s.generate()! + + // Create screen session for docusaurus development server + mut screen_name := 'docusaurus' + mut sf := screen.new()! + + // Add and start a new screen session + mut scr := sf.add( + name: screen_name + cmd: '/bin/bash' + start: true + attach: false + reset: true + )! + + // Send commands to the screen session + scr.cmd_send('cd ${s.path_build.path}')! + scr.cmd_send('bash develop.sh')! + + // Print instructions for user + console.print_header(' Docusaurus Development Server') + console.print_item('Development server is running in a screen session.') + console.print_item('To view the server output:') + console.print_item(' 1. Attach to screen: screen -r ${screen_name}') + console.print_item(' 2. To detach from screen: Press Ctrl+A then D') + console.print_item(' 3. To list all screens: screen -ls') + console.print_item('The site content is on::') + console.print_item(' 1. location of documents: ${s.path_src.path}/docs') + if osal.cmd_exists("code"){ + console.print_item(' 2. We opened above dir in vscode.') + osal.exec(cmd:'code ${s.path_src.path}/docs')! + } + + + // Start the watcher in a separate thread + //mut tf:=spawn watch_docs(docs_path, s.path_src.path, s.path_build.path) + //tf.wait()! + println("\n") + + if args_.watch_changes { + docs_path := '${s.path_src.path}/docs' + watch_docs(docs_path, s.path_src.path, s.path_build.path)! + } + + + return s +} + + +///////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////// + + +pub fn (mut f DocusaurusFactory) add(args_ DSiteNewArgs) !&DocSite { + console.print_header(' Docusaurus: ${args_.name}') + mut args := args_ + + if args.build_path.len == 0 { + args.build_path = '${f.path_build.path}' + } + // if args.publish_path.len == 0 { + // args.publish_path = '${f.path_publish.path}/${args.name}' + + + if args.url.len>0{ + + mut gs := gittools.new()! + args.path = gs.get_path(url: args.url)! + + } + + if args.path.len==0{ + return error("Can't get path from docusaurus site, its not specified.") + + } + + mut gs := gittools.new()! + mut r := gs.get_repo(url: 'https://github.com/freeflowuniverse/docusaurus_template.git',pull:args.update)! + mut template_path := r.patho()! + + // First ensure cfg directory exists in src, if not copy from template + if !os.exists("${args.path}/cfg") { + mut template_cfg := template_path.dir_get("cfg")! + template_cfg.copy(dest:"${args.path}/cfg")! + } + + if !os.exists("${args.path}/docs") { + mut template_cfg := template_path.dir_get("docs")! + template_cfg.copy(dest:"${args.path}/docs")! + } + + + mut myconfig:=load_config("${args.path}/cfg")! + + if myconfig.main.name.len==0{ + myconfig.main.name = myconfig.main.base_url.trim_space().trim("/").trim_space() + } + + + if args.name == '' { + args.name = myconfig.main.name + } + + if args.nameshort.len == 0 { + args.nameshort = args.name + } + args.nameshort = texttools.name_fix(args.nameshort) + + + + mut ds := DocSite{ + name: args.name + url: args.url + path_src: pathlib.get_dir(path: args.path, create: false)! + path_build: f.path_build + // path_publish: pathlib.get_dir(path: args.publish_path, create: true)! + args: args + config:myconfig + } + + f.sites << &ds + + return &ds +} + +@[params] +pub struct ErrorArgs { +pub mut: + path string + msg string + cat ErrorCat +} + +pub fn (mut site DocSite) error(args ErrorArgs) { + // path2 := pathlib.get(args.path) + e := SiteError{ + path: args.path + msg: args.msg + cat: args.cat + } + site.errors << e + console.print_stderr(args.msg) +} + +pub fn (mut site DocSite) generate() ! { + console.print_header(' site generate: ${site.name} on ${site.path_build.path}') + site.template_install()! + // osal.exec( + // cmd: ' + // cd ${site.path_build.path} + // #Docusaurus build --dest-dir ${site.path_publish.path} + // ' + // retry: 0 + // )! + + + // Now copy all directories that exist in src to build + for item in ["src","static","cfg"]{ + if os.exists("${site.path_src.path}/${item}"){ + mut aa:= site.path_src.dir_get(item)! + aa.copy(dest:"${site.path_build.path}/${item}")! + } + } + for item in ["docs"]{ + if os.exists("${site.path_src.path}/${item}"){ + mut aa:= site.path_src.dir_get(item)! + aa.copy(dest:"${site.path_build.path}/${item}",delete:true)! + } + } + +} + +fn (mut site DocSite) template_install() ! { + mut gs := gittools.new()! + + mut r := gs.get_repo(url: 'https://github.com/freeflowuniverse/docusaurus_template.git')! + mut template_path := r.patho()! + + //always start from template first + for item in ["src","static","cfg"]{ + mut aa:= template_path.dir_get(item)! + aa.copy(dest:"${site.path_build.path}/${item}",delete:true)! + } + + for item in ['package.json', 'sidebars.ts', 'tsconfig.json','docusaurus.config.ts'] { + src_path := os.join_path(template_path.path, item) + dest_path := os.join_path(site.path_build.path, item) + os.cp(src_path, dest_path) or { return error('Failed to copy ${item} to build path: ${err}') } + } + + for item in ['.gitignore'] { + src_path := os.join_path(template_path.path, item) + dest_path := os.join_path(site.path_src.path, item) + os.cp(src_path, dest_path) or { return error('Failed to copy ${item} to source path: ${err}') } + } + + cfg := site.config + + develop := $tmpl('templates/develop.sh') + build := $tmpl('templates/build.sh') + build_dev := $tmpl('templates/build_dev.sh') + + mut develop_ := site.path_build.file_get_new("develop.sh")! + develop_.template_write(develop,true)! + develop_.chmod(0o700)! + + mut build_ := site.path_build.file_get_new("build.sh")! + build_.template_write(build,true)! + build_.chmod(0o700)! + + mut build_dev_ := site.path_build.file_get_new("build_dev.sh")! + build_dev_.template_write(build_dev,true)! + build_dev_.chmod(0o700)! + + mut develop2_ := site.path_src.file_get_new("develop.sh")! + develop2_.template_write(develop,true)! + develop2_.chmod(0o700)! + + mut build2_ := site.path_src.file_get_new("build.sh")! + build2_.template_write(build,true)! + build2_.chmod(0o700)! + + mut build_dev2_ := site.path_src.file_get_new("build_dev.sh")! + build_dev2_.template_write(build_dev,true)! + build_dev2_.chmod(0o700)! + + + +} diff --git a/lib/web/docusaurus/factory.v b/lib/web/docusaurus/factory.v new file mode 100644 index 00000000..785438ac --- /dev/null +++ b/lib/web/docusaurus/factory.v @@ -0,0 +1,45 @@ +module docusaurus + +import os +// import freeflowuniverse.herolib.data.doctree.collection +import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.ui.console +// import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.develop.gittools + +@[heap] +pub struct DocusaurusFactory { +pub mut: + sites []&DocSite @[skip; str: skip] + path_build pathlib.Path + // path_publish pathlib.Path + args DocusaurusArgs +} + +@[params] +pub struct DocusaurusArgs { +pub mut: + // publish_path string + build_path string + production bool + update bool +} + +pub fn new(args_ DocusaurusArgs) !&DocusaurusFactory { + mut args := args_ + if args.build_path == '' { + args.build_path = '${os.home_dir()}/hero/var/docusaurus' + } + // if args.publish_path == ""{ + // args.publish_path = "${os.home_dir()}/hero/var/docusaurus/publish" + // } + mut ds := &DocusaurusFactory{ + args: args_ + path_build: pathlib.get_dir(path: args.build_path, create: true)! + // path_publish: pathlib.get_dir(path: args_.publish_path, create: true)! + } + + ds.template_install(args.update)! + + return ds +} diff --git a/lib/web/docusaurus/model.v b/lib/web/docusaurus/model.v new file mode 100644 index 00000000..bcfd1a27 --- /dev/null +++ b/lib/web/docusaurus/model.v @@ -0,0 +1,24 @@ +module docusaurus + +pub struct SiteError { + Error +pub mut: + path string + msg string + cat ErrorCat +} + +pub enum ErrorCat { + unknown + image_double + file_double + file_not_found + image_not_found + page_double + page_not_found + sidebar + circular_import + def + summary + include +} diff --git a/lib/web/docusaurus/template.v b/lib/web/docusaurus/template.v new file mode 100644 index 00000000..262f3f9f --- /dev/null +++ b/lib/web/docusaurus/template.v @@ -0,0 +1,28 @@ +module docusaurus + +import freeflowuniverse.herolib.develop.gittools +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.installers.web.bun + +fn (mut site DocusaurusFactory) template_install(update bool) ! { + mut gs := gittools.new()! + + mut r := gs.get_repo(url: 'https://github.com/freeflowuniverse/docusaurus_template.git',pull:update)! + mut template_path := r.patho()! + + for item in ['package.json', 'sidebars.ts', 'tsconfig.json'] { + mut aa := template_path.file_get(item)! + aa.copy(dest: '${site.path_build.path}/${item}')! + } + + // install bun + mut installer := bun.get()! + installer.install()! + + osal.exec( + cmd: ' + cd ${site.path_build.path} + bun install + ' + )! +} diff --git a/lib/web/docusaurus/templates/build.sh b/lib/web/docusaurus/templates/build.sh new file mode 100755 index 00000000..1ffe4a63 --- /dev/null +++ b/lib/web/docusaurus/templates/build.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +script_dir="??(cd "??(dirname "??{BASH_SOURCE[0]}")" && pwd)" +cd "??{script_dir}" + +echo "Docs directory: ??script_dir" + +cd ${site.path_build.path} + +export PATH=/tmp/docusaurus_build/node_modules/.bin:??PATH + +rm -rf ${site.path_build.path}/build/ + +bun docusaurus build + +rsync -rv --delete ${site.path_build.path}/build/ ${cfg.main.build_dest.trim_right("/")}/${cfg.main.name.trim_right("/")}/ diff --git a/lib/web/docusaurus/templates/build_dev.sh b/lib/web/docusaurus/templates/build_dev.sh new file mode 100755 index 00000000..8baa2596 --- /dev/null +++ b/lib/web/docusaurus/templates/build_dev.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e + +script_dir="??(cd "??(dirname "??{BASH_SOURCE[0]}")" && pwd)" +cd "??{script_dir}" + + +echo "Docs directory: ??script_dir" + +cd ${site.path_build.path} + +export PATH=/tmp/docusaurus_build/node_modules/.bin:??PATH + +rm -rf ${site.path_build.path}/build/ + +bun docusaurus build + +rsync -rv --delete ${site.path_build.path}/build/ ${cfg.main.build_dest_dev.trim_right("/")}/${cfg.main.name.trim_right("/")}/ diff --git a/lib/web/docusaurus/templates/develop.sh b/lib/web/docusaurus/templates/develop.sh new file mode 100755 index 00000000..3f476f26 --- /dev/null +++ b/lib/web/docusaurus/templates/develop.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e + +script_dir="??(cd "??(dirname "??{BASH_SOURCE[0]}")" && pwd)" +cd "??{script_dir}" + +echo "Docs directory: ??script_dir" + +cd ${site.path_build.path} + +export PATH=/tmp/docusaurus_build/node_modules/.bin:??PATH + +bun run start -p 3100 diff --git a/lib/web/docusaurus/watcher.v b/lib/web/docusaurus/watcher.v new file mode 100644 index 00000000..6b0b8aae --- /dev/null +++ b/lib/web/docusaurus/watcher.v @@ -0,0 +1,96 @@ +module docusaurus + +import freeflowuniverse.herolib.osal.notifier +import os + +fn watch_docs(docs_path string, path_src string, path_build string) ! { + mut n := notifier.new('docsite_watcher') or { + eprintln('Failed to create watcher: ${err}') + return + } + + n.args['path_src'] = path_src + n.args['path_build'] = path_build + + // Add watch with captured args + n.add_watch(docs_path, fn (event notifier.NotifyEvent, path string, args map[string]string) { + handle_file_change(event, path, args) or { eprintln('Error handling file change: ${err}') } + })! + + n.start()! +} + +// handle_file_change processes file system events +fn handle_file_change(event notifier.NotifyEvent, path string, args map[string]string) ! { + file_base := os.base(path) + is_dir := os.is_dir(path) + + // Skip files starting with # + if file_base.starts_with('#') { + return + } + + // For files (not directories), check extensions + if !is_dir { + ext := os.file_ext(path).to_lower() + if ext !in ['.md', '.png', '.jpeg', '.jpg'] { + return + } + } + + // Get relative path from docs directory + rel_path := path.replace('${args['path_src']}/docs/', '') + dest_path := '${args['path_build']}/docs/${rel_path}' + + match event { + .create, .modify { + if is_dir { + // For directories, just ensure they exist + os.mkdir_all(dest_path) or { + return error('Failed to create directory ${dest_path}: ${err}') + } + println('Created directory: ${rel_path}') + } else { + // For files, ensure parent directory exists and copy + os.mkdir_all(os.dir(dest_path)) or { + return error('Failed to create directory ${os.dir(dest_path)}: ${err}') + } + os.cp(path, dest_path) or { + return error('Failed to copy ${path} to ${dest_path}: ${err}') + } + println('Updated: ${rel_path}') + } + } + .delete { + if os.exists(dest_path) { + if is_dir { + os.rmdir_all(dest_path) or { + return error('Failed to delete directory ${dest_path}: ${err}') + } + println('Deleted directory: ${rel_path}') + } else { + os.rm(dest_path) or { return error('Failed to delete ${dest_path}: ${err}') } + println('Deleted: ${rel_path}') + } + } + } + .rename { + // For rename events, fswatch provides the new path in the event + // The old path is already removed, so we just need to handle the new path + if is_dir { + os.mkdir_all(dest_path) or { + return error('Failed to create directory ${dest_path}: ${err}') + } + println('Renamed directory to: ${rel_path}') + } else { + os.mkdir_all(os.dir(dest_path)) or { + return error('Failed to create directory ${os.dir(dest_path)}: ${err}') + } + os.cp(path, dest_path) or { + return error('Failed to copy ${path} to ${dest_path}: ${err}') + } + println('Renamed to: ${rel_path}') + } + } + } +} diff --git a/manual/best_practices/scripts/scripts.md b/manual/best_practices/scripts/scripts.md index e76c3fd9..eed603ea 100644 --- a/manual/best_practices/scripts/scripts.md +++ b/manual/best_practices/scripts/scripts.md @@ -6,7 +6,7 @@ example would be ```go -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run fn sh(cmd string) { println('❯ ${cmd}') @@ -33,7 +33,7 @@ $if !linux { ## argument parsing ```v -#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run import os import flag diff --git a/manual/best_practices/scripts/shebang.md b/manual/best_practices/scripts/shebang.md index b9e918b0..718aded9 100644 --- a/manual/best_practices/scripts/shebang.md +++ b/manual/best_practices/scripts/shebang.md @@ -5,7 +5,7 @@ is the first line of a script, your os will use that one to get started. for V we use ```bash -#!/usr/bin/env -S v -n -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run ``` - -w no warnings diff --git a/test_basic.vsh b/test_basic.vsh index d0e5345c..74865b8c 100755 --- a/test_basic.vsh +++ b/test_basic.vsh @@ -1,4 +1,4 @@ -#!/usr/bin/env -S v -cc gcc -n -w -gc none -no-retry-compilation -d use_openssl -enable-globals run +#!/usr/bin/env -S v -cc gcc -n -w -gc none -d use_openssl -enable-globals run import os import flag @@ -30,6 +30,12 @@ fn load_test_cache() TestCache { } } } +fn in_github_actions() bool{ + a:=os.environ()["GITHUB_ACTIONS"] or {return false} + return true +} + + // Save the test cache to JSON file fn save_test_cache(cache TestCache) { json_str := json.encode_pretty(cache) @@ -72,7 +78,7 @@ fn get_cache_key(path string, base_dir string) string { } // Check if a file should be ignored or marked as error based on its path -fn process_test_file(path string, base_dir string, test_files_ignore []string, test_files_error []string, mut cache TestCache, mut tests_in_error []string) ! { +fn process_test_file(path string, base_dir string, test_files_ignore []string, test_files_error []string, mut cache TestCache) ! { // Get normalized paths norm_path, rel_path := get_normalized_paths(path, base_dir) @@ -85,28 +91,21 @@ fn process_test_file(path string, base_dir string, test_files_ignore []string, t // Check if any ignore pattern matches the path for pattern in test_files_ignore { - if pattern.trim_space() != '' && rel_path.contains(pattern) { - should_ignore = true - break + println('Check ignore test: ${pattern} -- ${rel_path} ::: ${rel_path.contains(pattern.trim_space())}') + if pattern.trim_space() != '' && rel_path.contains(pattern.trim_space()) { + println('Ignoring test: ${rel_path}') + return } } // Check if any error pattern matches the path for pattern in test_files_error { - if pattern.trim_space() != '' && rel_path.contains(pattern) { - is_error = true - break - } - } - - if !should_ignore && !is_error { - dotest(norm_path, base_dir, mut cache)! - } else { - println('Ignoring test: ${rel_path}') - if !should_ignore { - tests_in_error << rel_path + if pattern.trim_space() != '' && rel_path.contains(pattern.trim_space()) { + println('Ignoring test because is error: ${rel_path}') + return } } + dotest(norm_path, base_dir, mut cache)! } fn dotest(path string, base_dir string, mut cache TestCache) ! { @@ -119,7 +118,7 @@ fn dotest(path string, base_dir string, mut cache TestCache) ! { return } - cmd := 'v -stats -enable-globals -n -w -gc none -no-retry-compilation test ${norm_path}' + cmd := 'v -stats -enable-globals -n -w -gc none test ${norm_path}' println(cmd) result := os.execute(cmd) eprintln(result) @@ -173,23 +172,25 @@ lib/develop ' // the following tests have no prio and can be ignored -tests_ignore := ' +mut tests_ignore := ' notifier_test.v clients/meilisearch clients/zdb clients/openai systemd_process_test.v - -// We should fix that one +data/graphdb +data/radixtree clients/livekit ' + + +if in_github_actions(){ + println("**** WE ARE IN GITHUB ACTION") + tests_ignore+="\nosal/tmux\n" +} + tests_error := ' -net_test.v -osal/package_test.v -rpc_test.v -screen_test.v -tmux_session_test.v tmux_window_test.v tmux_test.v startupmanager_test.v @@ -213,12 +214,13 @@ test_files := tests.split('\n').filter(it.trim_space() != '') test_files_ignore := tests_ignore.split('\n').filter(it.trim_space() != '') test_files_error := tests_error.split('\n').filter(it.trim_space() != '') -mut tests_in_error := []string{} - // Load test cache mut cache := load_test_cache() println('Test cache loaded from ${cache_file}') +println("tests to ignore") +println(tests_ignore) + // Run each test with proper v command flags for test in test_files { if test.trim_space() == '' || test.trim_space().starts_with('//') @@ -237,21 +239,12 @@ for test in test_files { // If directory, run tests for each .v file in it recursively files := os.walk_ext(full_path, '.v') for file in files { - process_test_file(file, norm_dir_of_script, test_files_ignore, test_files_error, mut - cache, mut tests_in_error)! + process_test_file(file, norm_dir_of_script, test_files_ignore, test_files_error, mut cache)! } } else if os.is_file(full_path) { - process_test_file(full_path, norm_dir_of_script, test_files_ignore, test_files_error, mut - cache, mut tests_in_error)! + process_test_file(full_path, norm_dir_of_script, test_files_ignore, test_files_error, mut cache)! } } println('All (non skipped) tests ok') -if tests_in_error.len > 0 { - println('\n\033[31mTests that need to be fixed (not executed):') - for test in tests_in_error { - println(' ${test}') - } - println('\033[0m') -} diff --git a/workflows/hero_build_linux.yml b/workflows/hero_build_linux.yml new file mode 100644 index 00000000..d1325cc2 --- /dev/null +++ b/workflows/hero_build_linux.yml @@ -0,0 +1,97 @@ +name: Build Hero on Linux & Run tests + +permissions: + contents: write + +on: + push: + workflow_dispatch: + +jobs: + build: + strategy: + matrix: + include: + - target: x86_64-unknown-linux-musl + os: ubuntu-latest + short-name: linux-i64 + # - target: aarch64-unknown-linux-musl + # os: ubuntu-latest + # short-name: linux-arm64 + # - target: aarch64-apple-darwin + # os: macos-latest + # short-name: macos-arm64 + # - target: x86_64-apple-darwin + # os: macos-13 + # short-name: macos-i64 + runs-on: ${{ matrix.os }} + steps: + - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" + - run: echo "🔎 The name of your branch is ${{ github.ref_name }} and your repository is ${{ github.repository }}." + + - name: Check out repository code + uses: actions/checkout@v3 + + - name: Setup Vlang + run: | + git clone --depth=1 https://github.com/vlang/v + cd v + make + sudo ./v symlink + cd .. + + - name: Setup Herolib + run: | + mkdir -p ~/.vmodules/freeflowuniverse + ln -s $GITHUB_WORKSPACE/lib ~/.vmodules/freeflowuniverse/herolib + + echo "Installing secp256k1..." + # Install build dependencies + sudo apt-get install -y build-essential wget autoconf libtool + + # Download and extract secp256k1 + cd /tmp + wget https://github.com/bitcoin-core/secp256k1/archive/refs/tags/v0.3.2.tar.gz + tar -xvf v0.3.2.tar.gz + + # Build and install + cd secp256k1-0.3.2/ + ./autogen.sh + ./configure + make -j 5 + sudo make install + + # Cleanup + rm -rf secp256k1-0.3.2 v0.3.2.tar.gz + + echo "secp256k1 installation complete!" + + - name: Install and Start Redis + run: | + # Import Redis GPG key + curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg + # Add Redis repository + echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list + # Install Redis + sudo apt-get update + sudo apt-get install -y redis + + # Start Redis + redis-server --daemonize yes + + # Print versions + redis-cli --version + redis-server --version + + - name: Build Hero + run: | + v -cg -enable-globals -w -n cli/hero.v + + - name: Do all the basic tests + run: | + ./test_basic.vsh + env: + LIVEKIT_API_KEY: ${{secrets.LIVEKIT_API_KEY}} + LIVEKIT_API_SECRET: ${{secrets.LIVEKIT_API_SECRET}} + LIVEKIT_URL: ${{secrets.LIVEKIT_URL}} diff --git a/.github/workflows/build_and_test.yml b/workflows/hero_build_macos.yml similarity index 89% rename from .github/workflows/build_and_test.yml rename to workflows/hero_build_macos.yml index 07b56a08..4fa917e5 100644 --- a/.github/workflows/build_and_test.yml +++ b/workflows/hero_build_macos.yml @@ -3,10 +3,6 @@ name: Build Hero & Run tests permissions: contents: write -on: - push: - workflow_dispatch: - jobs: build: strategy: @@ -38,7 +34,7 @@ jobs: - name: Build Hero run: | - v -w -cg -gc none -no-retry-compilation -d use_openssl -enable-globals cli/hero.v + v -w -cg -gc none -d use_openssl -enable-globals cli/hero.v - name: Do all the basic tests run: | diff --git a/.github/workflows/release.yml b/workflows/release.yml similarity index 95% rename from .github/workflows/release.yml rename to workflows/release.yml index d50a4678..5fab971a 100644 --- a/.github/workflows/release.yml +++ b/workflows/release.yml @@ -36,7 +36,7 @@ jobs: - name: Build Hero run: | - v -w -cg -gc none -no-retry-compilation -d use_openssl -enable-globals cli/hero.v -o cli/hero-${{ matrix.target }} + v -w -cg -gc none -d use_openssl -enable-globals cli/hero.v -o cli/hero-${{ matrix.target }} - name: Upload uses: actions/upload-artifact@v4