Compare commits

...

267 Commits

Author SHA1 Message Date
e34ba394b9 Merge branch 'development' into development_heroprompt_v2
* development: (182 commits)
  ...
  ...
  fix ci
  ...
  fix: Ignore regex_convert_test.v test
  refactor: Replace codewalker with pathlib and filemap
  test: Ignore virt/heropods/network_test.v in CI
  feat: implement container keep-alive feature
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  test: Add comprehensive heropods network and container tests
  ...
  ...
  codewalker
  ...

# Conflicts:
#	lib/develop/heroprompt/heroprompt_workspace.v
2025-11-25 18:54:14 +01:00
b3e673b38f ... 2025-11-25 18:53:18 +01:00
c94be548bf Merge branch 'development_nile_installers' into development
* development_nile_installers:
  fix ci
  feat: Add reset functionality to startup commands
  feat(cryptpad): Refactor installer configuration logic
  feat: Add PostgreSQL support for Gitea installer
  feat: Add Gitea Kubernetes installer
  feat(cryptpad): Refactor installer for dynamic configuration
2025-11-25 18:40:44 +01:00
8472d20609 Merge branch 'development_heropods' into development
* development_heropods: (21 commits)
  test: Ignore virt/heropods/network_test.v in CI
  feat: implement container keep-alive feature
  test: Add comprehensive heropods network and container tests
  refactor: Refactor Mycelium configuration and dependencies
  feat: Add Mycelium IPv6 overlay networking
  test: Replace hero binary checks with network test
  feat: Add iptables FORWARD rules for bridge
  Revert "feat: Add `pods` command for container management"
  feat: Add `pods` command for container management
  chore: Enable execution of cmd_run
  feat: Add `run` command for Heroscript execution
  feat: Separate initialization and configuration
  refactor: Remove hero binary installation from rootfs
  refactor: Integrate logger and refactor network operations
  feat: Implement container networking and improve lifecycle
  feat: Auto-install hero binary in containers
  feat: Add container management actions for heropods
  feat: Add heropods library to plbook
  refactor: Rename heropods variable and method
  refactor: Rename container factory to heropods
  ...
2025-11-25 18:40:41 +01:00
27279f8959 Merge branch 'development_gitea_installer' into development_nile_installers
* development_gitea_installer:
  feat: Add PostgreSQL support for Gitea installer
  feat: Add Gitea Kubernetes installer
2025-11-25 18:39:42 +01:00
7d4dc2496c Merge branch 'development_cryptpad' into development_nile_installers
* development_cryptpad:
  feat(cryptpad): Refactor installer configuration logic
  feat(cryptpad): Refactor installer for dynamic configuration
2025-11-25 18:39:27 +01:00
3547f04a79 Merge branch 'development_heropods' into development_nile_installers
* development_heropods: (21 commits)
  test: Ignore virt/heropods/network_test.v in CI
  feat: implement container keep-alive feature
  test: Add comprehensive heropods network and container tests
  refactor: Refactor Mycelium configuration and dependencies
  feat: Add Mycelium IPv6 overlay networking
  test: Replace hero binary checks with network test
  feat: Add iptables FORWARD rules for bridge
  Revert "feat: Add `pods` command for container management"
  feat: Add `pods` command for container management
  chore: Enable execution of cmd_run
  feat: Add `run` command for Heroscript execution
  feat: Separate initialization and configuration
  refactor: Remove hero binary installation from rootfs
  refactor: Integrate logger and refactor network operations
  feat: Implement container networking and improve lifecycle
  feat: Auto-install hero binary in containers
  feat: Add container management actions for heropods
  feat: Add heropods library to plbook
  refactor: Rename heropods variable and method
  refactor: Rename container factory to heropods
  ...
2025-11-25 18:39:08 +01:00
253e26aec6 Merge branch 'development' into development_nile_installers
* development: (27 commits)
  ...
  ...
  fix: Ignore regex_convert_test.v test
  refactor: Replace codewalker with pathlib and filemap
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  codewalker
  fix: Iterate over product requirement documents directly
  ...
  ...
  ...
  ...
  ...
2025-11-25 18:38:53 +01:00
a96d6e8aaa Merge branch 'development' of github.com:incubaid/herolib into development
* 'development' of github.com:incubaid/herolib:
  fix: Ignore regex_convert_test.v test
  refactor: Replace codewalker with pathlib and filemap
2025-11-25 18:38:27 +01:00
9fe669c5b8 ... 2025-11-25 18:38:21 +01:00
Timur Gordon
7e9bc1c41e fix ci 2025-11-25 14:55:00 +01:00
Timur Gordon
ee64a9fc58 Merge pull request #205 from Incubaid/development_fix_zinit
fix: Zinit client error when creating service
2025-11-25 14:41:51 +01:00
769c88adc8 ... 2025-11-25 14:08:52 +01:00
Mahmoud-Emad
520769a63e fix: Ignore regex_convert_test.v test 2025-11-25 14:55:18 +02:00
Mahmoud-Emad
1399d53748 refactor: Replace codewalker with pathlib and filemap
- Use pathlib for directory listing and filtering
- Use filemap for building file trees from selected directories
- Update build_file_map to use pathlib for recursive file listing
- Handle filemap building for standalone files and selected directories
2025-11-25 14:48:12 +02:00
Mahmoud-Emad
9f75a454fa test: Ignore virt/heropods/network_test.v in CI
- Add virt/heropods/network_test.v to ignored tests
- Ignore test requiring root for network bridge operations
2025-11-25 12:43:13 +00:00
Mahmoud-Emad
9a5973d366 feat: implement container keep-alive feature
- Add `keep_alive` parameter to `container_start`
- Implement logic to restart containers with `tail -f /dev/null` after successful entrypoint exit
- Update `podman_pull_and_export` to also extract image metadata
- Enhance `create_crun_config` to use extracted image metadata (ENTRYPOINT, CMD, ENV)
- Refactor test suite to use `keep_alive: true` for Alpine containers
2025-11-25 13:59:45 +02:00
fc41d3c62c ... 2025-11-25 06:13:56 +01:00
74146177e3 Merge branch 'development' of github.com:incubaid/herolib into development 2025-11-25 06:10:42 +01:00
c755821e34 ... 2025-11-25 06:10:17 +01:00
50a770c3ca ... 2025-11-25 06:03:37 +01:00
22dfcf4afa ... 2025-11-25 06:01:26 +01:00
b09e3ec0e1 ... 2025-11-25 05:51:55 +01:00
de7e1abcba ... 2025-11-25 05:44:58 +01:00
03d9e97008 ... 2025-11-25 05:23:17 +01:00
43eb15be7a ... 2025-11-25 05:13:02 +01:00
Mahmoud-Emad
76876049be test: Add comprehensive heropods network and container tests
- Add wait_for_process_ready to container start
- Reduce sigterm and stop check timeouts
- Update default container base directory
- Introduce new heropods test suite with multiple tests
- Add tests for initialization and custom network config
- Add tests for Docker image pull and container creation
- Add tests for container lifecycle (start, stop, delete)
- Add tests for container command execution
- Add tests for network IP allocation
- Add tests for IPv4 connectivity
- Add tests for container deletion and IP cleanup
- Add tests for bridge network setup and NAT rules
- Add tests for IP pool management
- Add tests for custom bridge configuration
2025-11-24 14:02:36 +02:00
803828e808 ... 2025-11-24 07:09:54 +01:00
9343772bc5 ... 2025-11-24 06:08:05 +01:00
d282a5dc95 codewalker 2025-11-24 05:48:13 +01:00
Mahmoud-Emad
dd7baa59b0 Merge branch 'development' into development_heropods 2025-11-23 13:06:50 +02:00
Mahmoud-Emad
69264adc3d fix: Iterate over product requirement documents directly
- Iterate over PRD objects instead of just IDs
- Pass PRD ID to delete function correctly
2025-11-23 12:13:25 +02:00
Mahmoud-Emad
3f943de9ed Merge branch 'development_nile_installers' into development_fix_zinit 2025-11-23 11:08:31 +02:00
Mahmoud-Emad
feffc09f73 Merge branch 'development' into development_nile_installers 2025-11-23 11:06:51 +02:00
Mahmoud-Emad
f11e0c689a Merge branch 'development' into development_nile_installers 2025-11-23 11:06:33 +02:00
Mahmoud-Emad
7c9f7c7568 Merge branch 'development_fix_zinit' of github.com:incubaid/herolib into development_fix_zinit 2025-11-23 11:01:59 +02:00
Mahmoud-Emad
dcd5af4d5f feat: Add reset functionality to startup commands
- Add `reset` boolean parameter to `StartArgs` struct
- Pass `reset` parameter to `startupcmd` calls
- Update service creation logic to handle `reset` flag
- Modify `install_start` and `restart` to pass `reset` parameter
2025-11-23 11:01:47 +02:00
4402cba8ac ... 2025-11-23 08:29:37 +01:00
01639853ce ... 2025-11-23 07:18:45 +01:00
0a25fc95b5 ... 2025-11-23 06:38:12 +01:00
9b5301f2c3 ... 2025-11-23 05:52:28 +01:00
2998a6e806 ... 2025-11-23 05:03:32 +01:00
0916ff07f8 ... 2025-11-23 05:01:31 +01:00
679108eb9e ... 2025-11-23 04:46:40 +01:00
1d4770aca5 ... 2025-11-23 04:43:08 +01:00
61a3677883 ... 2025-11-23 04:22:25 +01:00
27d2723023 .. 2025-11-22 18:32:19 +01:00
3d8effeac7 ... 2025-11-22 11:58:46 +02:00
a080fa8330 Merge branch 'development_zosbuilder' into development 2025-11-22 05:39:44 +02:00
d1584e929e Merge branch 'development' into development_fix_zinit
* development:
  ....
2025-11-22 05:37:21 +02:00
8733bc3fa8 Merge branch 'development' of github.com:incubaid/herolib into development
* 'development' of github.com:incubaid/herolib:
  refactor: Simplify default server retrieval
  test: Clear test database before running test
  test: Remove risks from PRD tests
  refactor: Pass rclone name as keyword argument
  test: Update test and script URL
  Refactor the herolib repo:
  test: Make page_exists call explicit
  test: Update image link assertion
  refactor: Extract heroscript path handling logic
  refactor: Keep file extensions when getting files
  refactor: Update image assertion syntax
  feat: Validate single input method for hero run
  feat: add cmd_run for heroscript execution
2025-11-22 05:36:12 +02:00
3ecb8c1130 .... 2025-11-22 05:36:04 +02:00
Mahmoud-Emad
26528a889d refactor: Update get calls and clean up debug
- Add `create: true` to get calls
- Remove commented-out print_backtrace
- Remove debug print for socket closure
2025-11-20 14:00:29 +02:00
Timur Gordon
41e3d2afe4 Merge branch 'development_nile_installers' into development_fix_zinit 2025-11-20 07:56:48 +01:00
Mahmoud-Emad
8daca7328d refactor: Improve service startup and management logic
- Add `create: true` to service `get` calls
- Update `running_check` to use `curl` for HTTP status code
- Ensure redis addresses have `redis://` prefix
- Clean up and re-create zinit services before starting
- Remove redundant `service_monitor` call in `startupmanager.start`
2025-11-19 17:08:49 +02:00
Timur Gordon
86da2cd435 chore: remove test installer and update template escape sequence
- Remove tester installer from playcmds factory
- Update template to use ^^ escape for @[params] annotation
- Format various model and actions files
2025-11-19 15:51:20 +01:00
Timur Gordon
a5c4b8f6f8 refactor: merge action handling blocks in play() functions
Merge the two separate if blocks for handling actions into a single block
since they both use the same logic for getting the name parameter with
get_default('name', 'default').

Changes:
- Combine destroy/install/build and start/stop/restart/lifecycle blocks
- All actions now use consistent name parameter handling
- Reduces code duplication in play() functions

Updated files:
- All 5 horus installer factory files
- Generator template objname_factory_.vtemplate
2025-11-19 15:44:50 +01:00
Timur Gordon
856a6202ee fix: escape @ symbol in template for InstallArgs annotation
Use @@ instead of @ in template to properly output @[params] in generated code.
V templates require double @@ to escape the @ symbol.
2025-11-19 15:17:31 +01:00
Timur Gordon
a40e172457 refactor: update installer generator templates to instance-based API
Update generator templates to produce installers following the new pattern:

Actions template (objname_actions.vtemplate):
- Convert all functions to methods on the config struct
- startupcmd() -> (self &Struct) startupcmd()
- running() -> (self &Struct) running_check()
- start_pre/post, stop_pre/post -> methods on struct
- installed(), install(), build(), destroy() -> methods on struct
- Add InstallArgs struct with reset parameter
- Remove get()! calls, use self instead

Factory template (objname_factory_.vtemplate):
- Update play() to get name parameter for all actions
- Call instance methods instead of module-level functions
- Add support for start_pre, start_post, stop_pre, stop_post actions
- Update start(), stop(), running() to use self.method() calls
- Remove duplicate InstallArgs and wrapper methods
- Use self.running_check() instead of running()

All newly generated installers will now follow the consistent
instance-based pattern with proper lifecycle hook support.
2025-11-19 15:09:24 +01:00
Mahmoud-Emad
012a59b3d8 Merge branch 'development_nile_installers' into development_fix_zinit 2025-11-19 16:04:30 +02:00
Mahmoud-Emad
6334036b79 Merge branch 'development' into development_fix_zinit 2025-11-19 15:18:23 +02:00
Mahmoud-Emad
df462174e5 refactor: Refactor Mycelium configuration and dependencies
- Flatten MyceliumConfig struct into HeroPods
- Remove Mycelium installer and service management logic
- Update Mycelium initialization to check for prerequisites only
- Adjust peers configuration to be comma-separated string
2025-11-19 15:17:39 +02:00
Mahmoud-Emad
1452d65f48 feat: Add Mycelium IPv6 overlay networking
- Integrate Mycelium IPv6 overlay networking
- Add Mycelium configuration options to HeroPods
- Enable Mycelium setup and cleanup for containers
- Include Mycelium examples and documentation
2025-11-19 14:23:06 +02:00
Timur Gordon
fcb178156b rename some installers, fix installer service startup w/ zinit 2025-11-19 11:42:55 +01:00
Timur Gordon
28313ad22f refactor: convert horus installers to instance-based API
- Convert all module-level functions to methods on config structs
- Add InstallArgs struct with reset parameter to actions files
- Update factory play() functions to call instance methods with name parameter
- Remove duplicate InstallArgs and wrapper methods from factory files
- Add support for start_pre, start_post, stop_pre, stop_post as callable actions
- Rename running() to running_check() to avoid conflicts
- All lifecycle methods (install, destroy, build, start, stop, etc.) now accept optional name parameter

Affected installers:
- coordinator
- supervisor
- herorunner
- osirisrunner
- salrunner

This provides a cleaner, more consistent API where all installer actions
can be called on specific configuration instances from heroscript files.
2025-11-19 11:03:36 +01:00
Mahmoud-Emad
9986dca758 test: Replace hero binary checks with network test
- Test network connectivity using wget
- Removed checks for hero binary existence and version
2025-11-19 11:03:17 +02:00
Mahmoud-Emad
9af9ab40b5 feat: Add iptables FORWARD rules for bridge
- Allow traffic from bridge to external interface
- Allow established traffic from external to bridge
- Allow traffic between containers on same bridge
2025-11-19 10:50:09 +02:00
Mahmoud-Emad
4b5a9741a0 Revert "feat: Add pods command for container management"
This reverts commit 11c3ea9ca5.
2025-11-18 16:26:49 +02:00
Mahmoud-Emad
11c3ea9ca5 feat: Add pods command for container management
- Implement `hero pods` CLI command
- Add subcommands for ps, images, create, start, stop, rm, exec, inspect
- Add flags for container creation and removal
2025-11-18 16:09:20 +02:00
Mahmoud-Emad
e7a38e555b Merge branch 'development' into development_heropods 2025-11-18 12:41:37 +02:00
Mahmoud-Emad
adb012e9cf refactor: Simplify default server retrieval
- Remove unused logic for default server lookup
- Consolidate server retrieval for Cryptpad and ElementChat
- Update default server assignment logic
2025-11-18 12:37:26 +02:00
Mahmoud-Emad
8ca7985753 refactor: Update default coordinator name
- Change default `ArgsGet.name` to 'default'
- Remove logic for overriding `args.name` with `coordinator_default`
- Set `coordinator_default` directly to `args.name`
2025-11-18 12:34:47 +02:00
Mahmoud-Emad
82378961db refactor: Remove unused default name logic
- Remove conditional logic for 'default' name
- Simplify 'get' function argument handling
2025-11-18 12:33:14 +02:00
Mahmoud-Emad
f9a2ebf24b chore: Refactor coordinator configuration and status reporting
- Update default coordinator name to 'coordinator'
- Improve status reporting by using dedicated variables
- Adjust `zinit.get` call to use `create: true`
- Set `zinit_default` based on `args.name` when 'default' is provided
- Update `coordinatorServer.name` default to 'coordinator'
- Make 'coordinator' the default for `ArgsGet.name`
- Use `coordinator_default` for `ArgsGet.name` if set
- Adjust `CoordinatorServer.binary_path` default
- Update `zinit.get` to use `create: true`
- Log socket closure for debugging
- Remove unused import `incubaid.herolib.core.texttools`
2025-11-18 11:50:52 +02:00
Mahmoud-Emad
9dc33e3ce9 Merge branch 'development' into development_nile_installers 2025-11-18 10:02:23 +02:00
Mahmoud-Emad
ae4997d80a Merge branch 'development' into development_heropods 2025-11-18 09:55:33 +02:00
Timur Gordon
b26c1f74e3 Merge branch 'development_nile_installers' of github.com:Incubaid/herolib into development_nile_installers 2025-11-17 16:02:44 +01:00
Timur Gordon
bf6dec48f1 add other horus installers, create examples, test startup 2025-11-17 15:57:40 +01:00
Mahmoud-Emad
3a05dc8ae0 test: Clear test database before running test
- Flush the Redis database for a clean test state
2025-11-17 15:58:01 +02:00
Mahmoud-Emad
5559bd4f2f test: Remove risks from PRD tests
- Remove assertions for risks in PRD tests
- Remove risk map initialization and population
- Update PRD encoding/decoding test
2025-11-17 15:40:14 +02:00
Mahmoud-Emad
9d79408931 refactor: Pass rclone name as keyword argument
- Update rclone.new call to use named argument
2025-11-17 15:28:47 +02:00
Mahmoud-Emad
f5c2b306b8 test: Update test and script URL
- Remove frontmatter test due to disabled parsing
- Update install script URL
2025-11-17 15:23:08 +02:00
Mahmoud-Emad
49868a18e1 Refactor the herolib repo:
- Removed the unused files
- Updated the README
- Added all needed scripts in /scripts dir
- Update script paths in CI configuration
- Update script paths in Go code
- Move installation scripts to scripts directory
- Change script path from ./install_v.sh to ./scripts/install_v.sh
2025-11-17 15:11:55 +02:00
Mahmoud-Emad
2ab0dfa6b8 test: Make page_exists call explicit
- Add '!' to col.page_exists('intro') call
2025-11-17 14:56:43 +02:00
Mahmoud-Emad
82375f9b89 test: Update image link assertion
- Change assertion for image link detection
- Use `file_type` instead of `is_image_link`
2025-11-17 14:51:15 +02:00
peternashaat
f664823a90 refactor: Remove Redis installation from coordinator, improve Rust detection
- Remove all Redis installation logic from coordinator installer
- Add osal.cmd_exists() check before installing Rust
- Update docs: Redis must be pre-installed
- Add reset flag documentation for forcing rebuilds
- Coordinator now only installs Rust and builds binary
2025-11-17 12:50:59 +00:00
Mahmoud-Emad
9b2e9114b8 refactor: Extract heroscript path handling logic
- Add helper function to expand and validate file paths
- Add helper function to validate heroscript content
- Add helper function to run heroscript from file
- Inline scripts now validated before execution
- File-based scripts now use the new run_from_file helper
2025-11-17 14:43:08 +02:00
Mahmoud-Emad
8dc2b360ba refactor: Keep file extensions when getting files
- Use `name_fix_keepext` instead of `name_fix`
- Update calls to `image_get`, `file_get`, and `file_or_image_get`
- Update checks in `image_exists`, `file_exists`, and `file_or_image_exists`
2025-11-17 13:39:25 +02:00
Mahmoud-Emad
49e48e7aca refactor: Update image assertion syntax
- Add '!' to image_exists calls
- Update image file name access
2025-11-17 12:18:26 +02:00
Mahmoud-Emad
586c6db34e Merge branch 'development' into development_heropods 2025-11-17 12:08:06 +02:00
Mahmoud-Emad
122a864601 Merge branch 'development' into development_heropods 2025-11-17 12:05:53 +02:00
Omdanii
571bc31179 Merge pull request #202 from Incubaid/development_herorun_cmd
Add Support for `hero run` Command
2025-11-17 12:03:28 +02:00
Mahmoud-Emad
35734b5ebc feat: Validate single input method for hero run
- Add validation for multiple input methods
- Improve error message for no script provided
- Update usage instructions in help message
2025-11-17 12:02:16 +02:00
Mahmoud-Emad
15f81aca41 feat: add cmd_run for heroscript execution
- Add `cmd_run` function to `herocmds` module
- Allow running heroscripts from inline strings via `-s` flag
- Enable running heroscripts from file paths via `-p` flag or as arguments
- Add `-r` flag to reset before running
2025-11-17 11:53:48 +02:00
peternashaat
1484fec898 - Renamed HerocoordinatorServer to CoordinatorServer 2025-11-17 07:58:28 +00:00
peternashaat
06fcfa5b50 feat: add dynamic Redis configuration to coordinator installer
- Add redis_port field to CoordinatorServer struct
- Refactor ensure_redis_running() to use @[params] pattern
- Pass redis_port and redis_addr dynamically from config
- Follow same pattern as cryptpad installer for consistency
2025-11-16 13:34:38 +00:00
peternashaat
a26f0a93fe refactor: improve coordinator installer code quality
- Remove unused imports (texttools, paramsparser)
- Move ensure_redis_running() helper to better location
- Add comments to lifecycle hook functions
- Improve error handling for binary copy operation
- Add documentation to obj_init function
2025-11-16 13:18:32 +00:00
peternashaat
eb0fe4d3a9 refactor: move coordinator installer to horus directory and fix Redis installer permissions
- Moved coordinator installer from installers/infra to installers/horus
- Renamed HerocoordinatorServer to CoordinatorServer
- Fixed Redis installer permissions for /var/lib/redis directory
- Integrated coordinator with new modular Redis installer
2025-11-16 13:05:29 +00:00
8a7987b9c3 ... 2025-11-15 07:09:56 +02:00
70d581fb57 Merge branch 'development' of github.com:incubaid/herolib into development 2025-11-15 06:16:01 +02:00
d267c1131f ... 2025-11-15 06:15:02 +02:00
1ac9092eed adds zosbuilder
Signed-off-by: Ashraf Fouda <ashraf.m.fouda@gmail.com>
2025-11-15 01:19:33 +02:00
peternashaat
bf79d6d198 feat: migrate Redis installer and add to coordinator dependencies
- Migrated Redis to new installer pattern with fixed config template
- Coordinator now auto-installs Redis if missing
- Added progress indicators and consolidated examples
2025-11-14 13:29:22 +00:00
peternashaat
eada09135c feat: migrate Redis installer and integrate into coordinator
- Created coordinator installer
- Migrated Redis installer to new modular pattern (_model.v, _actions.v, _factory_.v)
- Fixed Redis config template for 7.0.15 compatibility (commented out unsupported directives)
- Added Redis dependency check to coordinator installer
- Coordinator now auto-installs and starts Redis if not available
- Added progress indicators to coordinator build process
- Consolidated Redis example scripts
- All tests passing: Redis installation, coordinator build, and idempotency verified
2025-11-14 13:25:35 +00:00
a447aeec43 Merge pull request #182 from Incubaid/development_installer
Update install script
2025-11-14 02:23:56 -08:00
Timur Gordon
78d848783a fix breaking code 2025-11-14 10:26:45 +01:00
Mahmoud-Emad
8c2b5a8f5e chore: Enable execution of cmd_run
- Uncomment herocmds.cmd_run(mut cmd)
2025-11-14 11:21:07 +02:00
Mahmoud-Emad
fcb5964f8d feat: Add run command for Heroscript execution
- Add `cmd_run` to execute heroscripts from files or inline
- Implement file path handling and inline script execution
- Add Linux platform check for HeroPods initialization
- Update documentation to reflect Linux-only requirement
2025-11-14 11:20:26 +02:00
e97e0d77be Merge branch 'development' into development_docusaurus_atlas
* development:
  Fix redis package name for alpine
  ...
2025-11-14 08:52:21 +02:00
16155480de Merge branch 'development' of github.com:incubaid/herolib into development
* 'development' of github.com:incubaid/herolib: (26 commits)
  Fix redis package name for alpine
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  feat: Enhance docusaurus site generation with atlas client
  feat: Improve export self-containment and link handling
  ...
  feat: Add Atlas Export and AtlasClient example
  ...
2025-11-14 08:52:13 +02:00
e7611d4dc2 ... 2025-11-14 08:51:32 +02:00
Scott Yeager
45215b0abb Update installer 2025-11-13 08:04:17 -08:00
Scott Yeager
7246223e3b Use run_sudo everywhere 2025-11-12 05:37:56 -08:00
Scott Yeager
1958f24528 Add herolib version arg 2025-11-12 05:23:34 -08:00
c033cacd5b Fix redis package name for alpine 2025-11-12 05:11:01 -08:00
Mahmoud-Emad
d3f05c1834 feat: Separate initialization and configuration
- Move network defaults to obj_init
- Add initialize() method for heavy setup
- Improve separation of concerns for HeroPods initialization
2025-11-12 13:32:53 +02:00
Mahmoud-Emad
4bf16d6f70 refactor: Remove hero binary installation from rootfs
- Remove function `install_hero_in_rootfs`
- Remove call to `install_hero_in_rootfs`
2025-11-12 11:39:01 +02:00
Mahmoud-Emad
ad7e1980a5 refactor: Integrate logger and refactor network operations
- Replace console logging with logger.log calls
- Improve network bridge creation robustness
- Enhance network IP allocation and cleanup logic
- Refactor network cleanup for better concurrency handling
2025-11-12 11:28:56 +02:00
Mahmoud-Emad
7836a48ad4 feat: Implement container networking and improve lifecycle
- Add thread-safe network management for containers
- Implement graceful and forceful container stopping
- Enhance container creation and deletion logic
- Refine image management and metadata handling
- Add container name validation for security
2025-11-12 10:38:39 +02:00
Mahmoud-Emad
1d67522937 feat: Auto-install hero binary in containers
- Install hero binary into container rootfs
- Compile hero binary if not found on host
- Copy hero binary to container's /usr/local/bin
- Make hero binary executable in container
2025-11-11 12:48:34 +02:00
Mahmoud-Emad
e6c3ed93fa feat: Add container management actions for heropods
- Add processing for heropods.container_new
- Add processing for heropods.container_start
- Add processing for heropods.container_exec
- Add processing for heropods.container_stop
- Add processing for heropods.container_delete
2025-11-11 11:24:58 +02:00
Mahmoud-Emad
2fafd025eb feat: Add heropods library to plbook
- Import heropods library
- Play heropods library in plbook
2025-11-11 10:40:27 +02:00
Mahmoud-Emad
3ae980d4c5 refactor: Rename heropods variable and method
- Rename `factory` to `heropods_`
- Rename `factory.new` to `heropods_.container_new`
- Update error message for `heropods.new`
2025-11-11 10:11:01 +02:00
Mahmoud-Emad
e94734ecc7 refactor: Rename container factory to heropods
- Rename `factory` variable to `heropods_`
- Update calls from `factory.new` to `heropods_.container_new`
- Adjust error message for initialization
- Update print statements to reflect renaming
2025-11-11 10:08:59 +02:00
Mahmoud-Emad
deb1210405 refactor: Rename ContainerFactory to HeroPods
- Rename ContainerFactory struct to HeroPods
- Update method names and receiver types accordingly
- Adjust imports and internal references
2025-11-11 10:08:15 +02:00
Omdanii
759870e01e Merge pull request #196 from Incubaid/development_docusaurus_atlas
Docusaurus Atlas tool
2025-11-10 08:38:03 +02:00
891f3bf66d ... 2025-11-09 08:53:08 +04:00
3179d362fc ... 2025-11-09 08:47:11 +04:00
69d9949c39 ... 2025-11-09 08:20:11 +04:00
5d2adb1a2c ... 2025-11-09 08:17:00 +04:00
c409d42f64 ... 2025-11-09 07:43:44 +04:00
2dad87ad5e ... 2025-11-09 06:41:23 +04:00
fd5a348e20 ... 2025-11-09 06:36:05 +04:00
93fc823e00 ... 2025-11-09 06:25:44 +04:00
f40565c571 ... 2025-11-08 11:12:16 +04:00
5a6f3d323b ... 2025-11-07 07:58:53 +04:00
836a8f799e ... 2025-11-07 07:47:42 +04:00
b9a84ee8fc ... 2025-11-07 07:39:05 +04:00
0d3b4357ac ... 2025-11-07 07:24:38 +04:00
ea1a49ffd5 ... 2025-11-07 07:19:28 +04:00
f4de662fc2 ... 2025-11-07 07:00:23 +04:00
Mahmoud-Emad
a149845fc7 feat: Enhance docusaurus site generation with atlas client
- Add flags for development server and browser opening
- Introduce IDocClient interface for unified client access
- Implement atlas_client integration for Docusaurus
- Refactor link handling and image path resolution
- Update Docusaurus config with atlas client options
2025-11-06 15:44:09 +02:00
a2eaf6096e ... 2025-11-06 16:14:34 +04:00
Mahmoud-Emad
5fccd03ee7 Merge branch 'development_docusaurus_atlas' of github.com:incubaid/herolib into development_docusaurus_atlas 2025-11-06 10:51:42 +02:00
Mahmoud-Emad
347ebed5ea feat: Improve export self-containment and link handling
- Use absolute paths for path_relative calculations
- Validate links before export to populate page.links
- Copy cross-collection referenced pages for self-contained export
- Update export_link_path to generate local links for self-contained exports
- Remove page from visited map to allow re-inclusion in other contexts
2025-11-06 10:51:10 +02:00
b582bd03ef ... 2025-11-06 09:40:59 +04:00
Mahmoud-Emad
ac09648a5b feat: Add Atlas Export and AtlasClient example
- Add example for exporting Atlas collections
- Demonstrate using AtlasClient to read exported content
- Include examples for listing collections and pages
- Show reading page content and metadata via AtlasClient
2025-11-05 16:08:56 +02:00
Mahmoud-Emad
04e1e2375f refactor: Remove docusaurus dev server and path_meta flag
- Remove 'dev' flag from run command
- Remove 'path_meta' flag from run command
- Remove docusaurus integration from playcmds
- Add `validate_links` and `fix_links` to Atlas
- Refactor page link processing for clarity and export mode
2025-11-05 15:25:50 +02:00
Mahmoud-Emad
a2ac8c0027 refactor: Simplify text normalization comments
- Remove outdated comments related to normalization
- Update comments for clarity
2025-11-05 10:04:57 +02:00
Mahmoud-Emad
2150b93a80 refactor: Update name normalization logic
- Use texttools.name_fix instead of name_fix_no_underscore_no_ext
- Preserve underscores in normalized names
- Update documentation and tests to reflect changes
2025-11-05 10:01:18 +02:00
Mahmoud-Emad
10b9af578a feat: Add Docusaurus dev server integration
- Add 'dev' flag to run Docusaurus server
- Import docusaurus library
- Enable scan and export if 'dev' flag is set
- Handle export errors more gracefully
- Start Docusaurus dev server after export
2025-11-04 16:49:00 +02:00
Mahmoud-Emad
8bfb021939 feat: Support atlas_client module:
- Add client for atlas module
- Add unit tests to test the workflow
- Remove println statements from file_or_image_exists
- Remove println statements from link processing loop
2025-11-04 15:56:07 +02:00
Mahmoud-Emad
ecfe77a2dc refactor: Normalize page and collection names
- Use `name_fix_no_underscore_no_ext` for consistent naming
- Remove underscores and special characters from names
- Add tests for name normalization functions
- Ensure page and collection names are consistently formatted
- Update link parsing to use normalized names
2025-11-04 12:28:13 +02:00
peternashaat
683008da8f feat(cryptpad): Refactor installer configuration logic
Refactors the CryptPad installer to improve its configuration handling.

- The `hostname` and `namespace` are now derived from the installer's `name` property by default.
- Implemented name sanitization to remove special characters (`_`, `-`, `.`).
- Added validation to ensure the namespace does not contain invalid characters.
- Updated the factory's `reload` function to persist changes made to the installer object after its initial creation.

This change ensures consistent and predictable behavior, allowing for both default generation and manual override of configuration values.

Co-authored-by: Mahmoud-Emad <mahmmoud.hassanein@gmail.com>
2025-11-04 09:01:53 +00:00
Omdanii
ef14bc6d82 Merge pull request #184 from Incubaid/development_heroserver_errors
refactor: Update library paths
2025-11-03 23:21:42 +02:00
Mahmoud-Emad
bafc519cd7 feat: Add PostgreSQL support for Gitea installer
- Add PostgreSQL configuration options
- Generate PostgreSQL YAML when selected
- Verify PostgreSQL pod readiness
- Update documentation for PostgreSQL usage
- Add PostgreSQL service and pod definitions
2025-11-03 17:04:40 +02:00
Mahmoud-Emad
472e4bfaaa feat: Add Gitea Kubernetes installer
- Add Gitea installer module and types
- Implement installation and destruction logic
- Integrate with Kubernetes and TFGW
- Add example usage and documentation
2025-11-03 16:25:21 +02:00
peternashaat
e3c8d032f7 Merge remote-tracking branch 'origin/development' into development_cryptpad 2025-11-03 13:54:27 +00:00
Omdanii
7d72faa934 Merge pull request #193 from Incubaid/development_element_and_matrix
feat: Implement Element Chat Kubernetes installer
2025-11-03 15:51:23 +02:00
Mahmoud-Emad
8e5507b04e fix: Update element chat config and defaults
- Update element chat default name to 'elementchat'
- Sanitize element chat name from invalid characters
- Set default namespace based on sanitized name
- Validate namespace for invalid characters
- Update documentation with new default values
2025-11-03 15:49:54 +02:00
peternashaat
6746d885f8 feat(cryptpad): Refactor installer for dynamic configuration
This commit refactors the CryptPad Kubernetes installer to be more dynamic and configurable structure.

Key changes include:
-   **Dynamic Configuration**: The installer now generates its configuration based on parameters passed from the `.vsh` script, with sensible defaults for any unspecifie
d values.
-   **Templated `config.js`**: Introduced a new `config.js` template to allow for greater flexibility and easier maintenance of the CryptPad configuration.
-   **Improved Code Structure**: The source code has been updated to be more modular and maintainable.
-   **Updated Documentation**: The `README.md` has been updated to include instructions on how to run the installer and customize the installation.

Co-authored-by: Mahmoud-Emad <mahmmoud.hassanein@gmail.com>
2025-11-03 13:12:46 +00:00
Mahmoud-Emad
2e56311cd0 refactor: Prefix hostnames with instance name
- Prefix matrix_hostname with mycfg.name
- Prefix element_hostname with mycfg.name

Co-authored-by: peternashaaat <peternashaaat@gmail.com>
2025-11-03 12:24:59 +02:00
Mahmoud-Emad
4d3071f2d2 feat: Update installer name
- Change installer name from 'myelementchat' to 'kristof'

Co-authored-by: peternashaaat <peternashaaat@gmail.com>
2025-11-02 17:29:20 +02:00
Mahmoud-Emad
3ee0e5b29c feat: Implement Element Chat Kubernetes installer
- Add Element Chat installer module
- Integrate Conduit and Element Web deployments
- Support TFGW integration for FQDNs and TLS
- Implement installation and destruction logic
- Generate Kubernetes YAML from templates

Co-authored-by: peternashaaat <peternashaaat@gmail.com>
2025-11-02 17:24:01 +02:00
Omdanii
672ff886d4 Merge pull request #192 from Incubaid/development_openrouter
Adding Cryptpad installer and finalizing the Kubernetes client
2025-11-02 13:54:21 +02:00
Mahmoud-Emad
44c8793074 refactor: Update cryptpad installer code
- Use installer.kube_client for Kubernetes operations
- Remove redundant startupmanager calls
- Simplify `delete_resource` command
- Add default values for installer name and hostname
- Refactor `get` function to use new arguments correctly
- Remove commented out example code and unused imports
- Change the factory file<REQUIRED> to load the default instance name
- Update the README file of the installer

Co-authored-by: peternahaaat <peternashaaat@gmail.com>
2025-11-02 13:37:38 +02:00
Mahmoud-Emad
86549480b5 Merge branch 'development_openrouter' of github.com:incubaid/herolib into development_openrouter 2025-10-30 18:00:54 +03:00
Mahmoud-Emad
80108d4b36 refactor: Refactor Kubernetes client and CryptPad installer
- Replace kubectl exec calls with Kubernetes client methods
- Improve error handling and logging in Kubernetes client
- Enhance node information retrieval and parsing
- Add comprehensive unit tests for Kubernetes client and Node structs
- Refine YAML validation to allow custom resource definitions
- Update CryptPad installer to use the refactored Kubernetes client
2025-10-30 17:58:03 +03:00
peternashaat
81adc60eea feat(cryptpad): Use constants for deployment retry logic
Refactor the installer to use global constants for the maximum number of retries and the check interval when verifying deployments.

This change removes hardcoded values from the FQDN and deployment status checks, improving maintainability and centralizing configuration.
2025-10-30 13:21:49 +00:00
peternashaat
82d37374d8 Cryptpad installer 2025-10-30 11:46:15 +00:00
Mahmoud-Emad
c556cc71d4 feat: Implement Kubernetes client and example
- Add Kubernetes client module for interacting with kubectl
- Implement methods to get cluster info, pods, deployments, and services
- Create a Kubernetes example script demonstrating client usage
- Add JSON response structs for parsing kubectl output
- Define runtime resource structs (Pod, Deployment, Service) for structured data
- Include comprehensive unit tests for data structures and client logic
2025-10-29 16:46:37 +03:00
Mahmoud-Emad
79b78aa6fe feat: Implement Kubernetes installer for kubectl
- Add install functionality for kubectl
- Implement destroy functionality for kubectl
- Add platform-specific download URLs for kubectl
- Ensure .kube directory is created with correct permissions
2025-10-29 13:32:43 +03:00
Mahmoud-Emad
f6734a3568 chore: Remove openrouter client
- Remove call to openrouter.play from the main play function
- Used the OpenAI client instead
- Updated the examples
- Updated the README
2025-10-29 11:42:44 +03:00
0adb38a8a7 Merge branch 'development' into development_heroserver_errors
* development:
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  refactor: Update OpenRouter client and examples
2025-10-29 12:10:23 +04:00
88f83cbfe2 ... 2025-10-29 12:09:53 +04:00
4e4abc055b ... 2025-10-29 09:49:49 +04:00
05c789da7e ... 2025-10-29 09:36:37 +04:00
9c8bcbff0c ... 2025-10-29 09:35:46 +04:00
fbed626771 ... 2025-10-29 09:28:27 +04:00
8583238fdb ... 2025-10-29 09:25:55 +04:00
c5f1d39958 ... 2025-10-29 07:53:34 +04:00
15ec641bc6 Merge branch 'development' into development_heroserver_errors
* development:
  ...
  ...
  ...
  ...
  ...
  ...
  ..
  ...
  ...
  ...
  ...
  ...
  ...
  atlas is working
  reverted
  ...
  ...
  ...
2025-10-29 07:05:50 +04:00
Mahmoud-Emad
4222dac72e refactor: Update OpenRouter client and examples
- Add error handling for client initialization
- Improve example scripts for clarity and robustness
- Refine client configuration and usage patterns
- Update documentation with current examples and features
- Enhance model handling and response processing
2025-10-28 22:40:37 +03:00
d1c0c8f03e ... 2025-10-26 23:44:04 +04:00
1973b58deb ... 2025-10-26 22:42:41 +04:00
46ce903d4d ... 2025-10-26 22:24:18 +04:00
9d1c347da7 ... 2025-10-26 21:18:39 +04:00
216eb262dd ... 2025-10-26 21:14:10 +04:00
b85ac9adc9 ... 2025-10-26 18:14:32 +04:00
79f2752b30 .. 2025-10-26 11:39:54 +04:00
d4911748ec ... 2025-10-26 11:37:24 +04:00
e574bcbc50 ... 2025-10-25 09:44:19 +04:00
9d2dedb2b6 ... 2025-10-25 09:03:03 +04:00
569d980336 ... 2025-10-25 08:51:57 +04:00
3df101afc7 ... 2025-10-24 16:54:43 +04:00
19fd4649be ... 2025-10-24 13:58:31 +04:00
Mahmoud-Emad
521596b29b refactor: Remove unused saved_content variable
- Remove redundant variable `saved_content`
- Simplify text concatenation logic
- Comment out unused '*' character handling
2025-10-23 17:45:33 +03:00
Mahmoud-Emad
53b5ee950f fix: Mark action element as processed
- Set action element to processed after content update
2025-10-23 17:43:38 +03:00
5cdac4d7fd atlas is working 2025-10-23 16:41:48 +02:00
581ae4808c reverted 2025-10-23 08:29:10 +02:00
bc26c88188 ... 2025-10-23 08:20:31 +02:00
f7ea3ec420 ... 2025-10-23 08:19:08 +02:00
091aef5859 ... 2025-10-23 07:48:20 +02:00
Mahmoud-Emad
c99831ee9b feat: Add virt/kubernetes directory
- Add virt/kubernetes directory
- Initialize Kubernetes integration setup
2025-10-22 21:46:57 +03:00
Mahmoud-Emad
4ab78c65e3 refactor: Update library paths
- Remove `lib/hero`
- Add `lib/hero/heromodels`
2025-10-22 21:41:47 +03:00
Omdanii
67d4137b61 Merge pull request #183 from Incubaid/development_heroserver_errors
fix: Improve error handling and optional crypto_client
2025-10-22 21:36:35 +03:00
Mahmoud-Emad
afbfa11516 refactor: Improve frontmatter and def parsing logic
- Save content before modifying
- Handle '*' character for defs correctly
- Re-enable frontmatter parsing for '---' and '+++'
- Re-enable frontmatter parsing for '---' and '+++' in paragraphs
2025-10-22 21:31:49 +03:00
Mahmoud-Emad
4f3a81b097 Merge branch 'development' into development_heroserver_errors 2025-10-22 21:15:17 +03:00
Mahmoud-Emad
0bfb5cfdd0 refactor: Update JSON parsing and schema inflation
- Use `json2.decode[json2.Any]` instead of `json2.raw_decode`
- Add `@[required]` to procedure function signatures
- Improve error handling for missing JSONRPC fields
- Update `encode` to use `prettify: true`
- Add checks for missing schema and content descriptor references
2025-10-22 21:14:29 +03:00
Mahmoud-Emad
d0ca0ca42d Merge branches 'development' and 'development' of github.com:incubaid/herolib into development 2025-10-22 11:06:24 +03:00
Mahmoud-Emad
98ba344d65 refactor: Use action_ directly instead of action alias
- Access action_ parameters directly
- Avoid creating a mutable alias for action_
2025-10-22 11:06:07 +03:00
fee3314653 Merge branch 'development' of github.com:incubaid/herolib into development 2025-10-22 09:38:51 +02:00
caedf2e2dd ... 2025-10-22 09:38:49 +02:00
Mahmoud-Emad
37f0aa0e96 feat: Implement dark mode theme and improve UI
- Add CSS variables for theming
- Implement dark mode toggle functionality
- Refactor styles for better organization and readability
- Update navigation bar with theme toggle button
- Enhance hero section with display-4 font size
- Adjust card styles for consistent appearance
- Improve alert and badge styling
- Make hero server title bold and larger
- Use Bootstrap 5 classes for consistent styling
- Add prefetch for Bootstrap JS
- Update `auth_enabled` default to false in server creation
2025-10-21 23:32:25 +03:00
Mahmoud-Emad
63c2efc921 feat: Group API methods and improve TOC
- Add method grouping by model/actor prefix
- Introduce DocMethodGroup struct for grouped methods
- Refactor TOC to display methods by groups
- Add collapsible sections for method groups and methods
- Improve CSS for better presentation of grouped content
2025-10-21 16:43:43 +03:00
Mahmoud-Emad
8ef9522676 refactor: Update method names and add curl example generation
- Rename API method names using dot notation
- Add endpoint_url and curl_example to DocMethod
- Implement generate_curl_example function
- Update DocMethod struct with new fields
2025-10-21 15:39:59 +03:00
Mahmoud-Emad
a120ef2676 refactor: Improve schema example generation and inflation
- Inflate methods to resolve $ref references
- Use schema-generated examples for requests
- Implement robust recursive schema example generation
- Add constants for example generation depth and property limits
- Utilize V's json2 module for JSON pretty-printing
2025-10-21 15:02:18 +03:00
Mahmoud-Emad
27c8273eec docs: Document doctree export and WARP guidelines
- Add documentation for doctree export functionality
- Include WARP.md with project guidelines and commands
2025-10-21 11:26:48 +03:00
Mahmoud-Emad
c1489fc491 fix: Improve error handling and optional crypto_client
- Add explicit error handling for HeroModels initialization
- Enhance error messages for HeroDB connection and ping failures
- Make crypto_client optional in HeroServer configuration
- Initialize crypto_client only when auth_enabled is true
- Ensure crypto_client is available before use in auth_submit
2025-10-21 11:10:30 +03:00
12c6aabed5 ... 2025-10-21 09:46:06 +02:00
67d13d081b ... 2025-10-21 09:30:37 +02:00
12ad7b1e6f ... 2025-10-21 09:30:22 +02:00
1cde14f640 ... 2025-10-21 09:12:00 +02:00
a5f8074411 ... 2025-10-21 08:51:27 +02:00
f69078a42e ... 2025-10-21 08:49:11 +02:00
43308dfbe1 Merge branch 'development' into development_heroprompt_v2
* development:
  ...
  feat: Update site page source references
  feat: Add announcement bar configuration
  ...
  Update the pages
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  ...
  fix: Improve Docusaurus link generation logic
2025-10-21 06:45:24 +02:00
Scott Yeager
9f3f1914ce Remove build workarounds 2025-10-20 13:59:21 -07:00
Scott Yeager
f2e1e7c11c Update install script 2025-10-20 13:59:21 -07:00
b538540cd4 Merge branch 'development' of github.com:incubaid/herolib into development 2025-10-19 16:28:37 +02:00
1a76c31865 ... 2025-10-19 16:28:35 +02:00
Mahmoud-Emad
f477fe46b3 feat: Update site page source references
- Update `site.page` src from "tech:introduction" to "mycelium_tech:introduction"
- Update `site.page` src from "tech:mycelium" to "mycelium_tech:mycelium"
2025-10-19 16:58:18 +03:00
Mahmoud-Emad
b18c6824d6 feat: Add announcement bar configuration
- Add AnnouncementBar struct and field to Configuration
- Add announcement.json file generation
- Implement play_announcement function for importing announcement config
- Improve fix_links to calculate relative paths dynamically
- Escape single quotes in YAML frontmatter fields
2025-10-16 17:38:18 +03:00
b2bc0d1b6a Merge branch 'development' of github.com:incubaid/herolib into development 2025-10-16 16:03:48 +04:00
f2f87eb7fd ... 2025-10-16 16:03:45 +04:00
Mahmoud-Emad
112894b24f Update the pages 2025-10-16 12:47:50 +03:00
4cfc018ace ... 2025-10-16 13:23:15 +04:00
05db43fe83 ... 2025-10-16 10:32:16 +04:00
c35ba97682 ... 2025-10-16 10:28:48 +04:00
f4711681dc ... 2025-10-16 10:12:02 +04:00
cb52bcfbe4 ... 2025-10-16 10:00:06 +04:00
099b21510d ... 2025-10-16 09:51:42 +04:00
91fdf9a774 ... 2025-10-16 09:45:42 +04:00
cf601283b1 ... 2025-10-16 09:25:03 +04:00
6918a02eff ... 2025-10-16 08:09:11 +04:00
Omdanii
b4971de5e9 Merge pull request #181 from Incubaid/development_herodocs_links
fix: Improve Docusaurus link generation logic
2025-10-15 17:38:35 +03:00
Mahmoud-Emad
9240e2ede8 fix: Improve Docusaurus link generation logic
- Add function to strip numeric prefixes from filenames
- Strip numeric prefixes from links for Docusaurus compatibility
- Fix same-collection relative links
- Convert collection:page links to relative paths
- Remove .md extensions from generated links
2025-10-15 16:44:02 +03:00
Mahmoud-Emad
e8904ea1ce chore: Formating the pages 2025-10-15 15:38:13 +03:00
Mahmoud-Emad
3d25fe0f04 refactor: Update import paths and save logic
- Update import paths from `freeflowuniverse.herolib` to `incubaid.herolib`
- Ensure `ws.parent.save()` is only called when `ws.parent` is present
- Remove redundant symlink cleanup for `freeflowuniverse.herolib`
2025-10-15 15:03:25 +03:00
Mahmoud-Emad
d91957b945 Merge branch 'development' into development_heroprompt_v2 2025-10-15 14:40:14 +03:00
Mahmoud-Emad
446c54b0b5 feat: Improve Docusaurus link generation
- Add function to fix links for nested categories
- Adjust path generation for nested collections
- Remove .md extensions from Docusaurus links
- Conditionally apply name_fix to page paths
2025-10-15 04:26:29 +03:00
Scott Yeager
8edf8c9299 Delete extra file 2025-10-14 16:23:49 -07:00
Mahmoud-Emad
9c4520a645 Merge branch 'development' of github.com:incubaid/herolib into development 2025-10-15 02:08:17 +03:00
Mahmoud-Emad
fc9142b005 fix: Ensure pagepath ends with a slash
- Use section_current.path for default page path
- Append slash if pagepath is not empty and doesn't end with one
2025-10-15 02:07:52 +03:00
Scott Yeager
cc31ce0f6f Update install script to use new release file names 2025-10-14 15:17:11 -07:00
Scott Yeager
1688c4f9b0 bump version to 1.0.36 2025-10-14 14:37:14 -07:00
Scott Yeager
9a7b9b8a10 Fix release flow 2025-10-14 14:35:59 -07:00
Mahmoud-Emad
923f8c24e7 feat: Improve HeroPrompt file selection and workspace management
- Refactor Directory struct and its methods.
- Update file selection logic for directories and files.
- Enhance prompt generation with better file mapping.
- Add unit tests for directory and file operations.
- Improve workspace management with auto-save and logging.
2025-10-12 12:16:52 +03:00
Mahmoud-Emad
40ad68e0ff Merge branch 'development' into development_heroprompt_v2 2025-10-10 15:25:22 +03:00
Mahmoud-Emad
1762387301 feat: add recursive file and directory search
- Introduce SearchResult struct for search items
- Implement search_files and recursive search logic
- Migrate heroprompt file operations to pathlib
- Update expand_home_path to use pathlib
- Integrate codewalker and pathlib in example script
2025-09-14 12:42:43 +03:00
ea9286687d Merge branch 'development' into development_heroprompt_v2
* development:
  Trigger security scan
  Add Github Actions Security workflow
2025-09-11 08:08:27 +04:00
Mahmoud-Emad
cc837a1427 feat: enhance file selection and prompt generation
- Add gitignore filtering to file tree and search
- Introduce recursive directory listing API
- Enable recursive directory selection in UI
- Pass selected paths directly for prompt generation
- Refactor API endpoint names and error handling
2025-09-09 16:31:08 +03:00
Mahmoud-Emad
154c08411c refactor: Simplify prompt content and file map generation
- Extract recursive file tree logic into new helper function
- Remove explicit file content generation from prompt
- Simplify `build_file_map` to only generate file trees
- Eliminate file metadata calculation from `build_file_map`
- Comment out extensive example workspace operations
2025-09-09 11:50:12 +03:00
1870f2a7ce Merge branch 'development' into development_heroprompt_v2
* development:
  ...
  ...
  add example heromodels call
  add example and heromodels openrpc server
  remove server from gitignore
  clean up and fix openrpc server implementation
  Test the workflow
2025-09-09 06:31:24 +04:00
Mahmoud-Emad
ff92f6eff2 feat: Initialize Workspace with metadata fields
- Initialize `children`, `created`, `updated`, `is_saved`
- Add `time` import for timestamp fields
- Remove unused `ui.console` import
- Update package version constant to `1.0.0`
2025-09-08 15:45:17 +03:00
Mahmoud-Emad
eeb5e207f2 Merge branch 'development' into development_heroprompt_v2 2025-09-08 14:55:31 +03:00
Mahmoud-Emad
09b595948d Merge branch 'development' into development_heroprompt_v2 2025-09-07 14:52:43 +03:00
Mahmoud-Emad
63c0b81fc9 feat: Support multi-root workspaces
- Remove `base_path` from Workspace struct and APIs
- Enable adding multiple root directories to a workspace
- Update file tree UI to display all workspace roots
- Refactor file map generation for multi-root display
- Improve prompt output clipboard copy with status
2025-09-07 14:40:17 +03:00
867 changed files with 41802 additions and 45349 deletions

View File

@@ -27,7 +27,7 @@ jobs:
uses: actions/checkout@v4
- name: Setup Vlang
run: ./install_v.sh
run: ./scripts/install_v.sh
- name: Generate documentation
run: |

View File

@@ -46,9 +46,6 @@ jobs:
cd v
make
./v symlink
if [ "${{ runner.os }}" = "macOS" ]; then
sudo sed -i '' '618,631d' /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/math.h
fi
cd -
mkdir -p ~/.vmodules/incubaid
@@ -71,6 +68,7 @@ jobs:
run: |
set -e
if [ "${{ runner.os }}" = "Linux" ]; then
sudo apt-get install libpq-dev
# Build for glibc
v -w -d use_openssl -enable-globals -cc gcc cli/hero.v -o cli/hero-${{ matrix.target }}
@@ -91,7 +89,7 @@ jobs:
'
else
v -w -d use_openssl -enable-globals -gc none -cc tcc cli/hero.v -o cli/hero-${{ matrix.target }}
v -w -d use_openssl -enable-globals -cc clang cli/hero.v -o cli/hero-${{ matrix.target }}
fi
- name: Upload glibc binary

View File

@@ -24,7 +24,7 @@ jobs:
run: |
# Updating man-db takes a long time on every run. We don't need it
sudo apt-get remove -y --purge man-db
./install_v.sh
./scripts/install_v.sh
- name: Setup Herolib from current branch
run: |

3
.gitignore vendored
View File

@@ -57,4 +57,5 @@ MCP_HTTP_REST_IMPLEMENTATION_PLAN.md
tmux_logger
release
install_herolib
doc
doc
priv_key.bin

View File

@@ -1,5 +0,0 @@
when fixing or creating code, refer to the following hints:
@aiprompts/vlang_herolib_core.md

View File

@@ -1,6 +0,0 @@
{
"context": "Workspace",
"bindings": {
"cmd-r": ["task::Spawn", { "task_name": "ET", "reveal_target": "center" }]
}
}

View File

@@ -1,47 +0,0 @@
[
{
"label": "ET",
"command": "for i in {1..5}; do echo \"Hello $i/5\"; sleep 1; done",
//"args": [],
// Env overrides for the command, will be appended to the terminal's environment from the settings.
"env": { "foo": "bar" },
// Current working directory to spawn the command into, defaults to current project root.
//"cwd": "/path/to/working/directory",
// Whether to use a new terminal tab or reuse the existing one to spawn the process, defaults to `false`.
"use_new_terminal": true,
// Whether to allow multiple instances of the same task to be run, or rather wait for the existing ones to finish, defaults to `false`.
"allow_concurrent_runs": false,
// What to do with the terminal pane and tab, after the command was started:
// * `always` — always show the task's pane, and focus the corresponding tab in it (default)
// * `no_focus` — always show the task's pane, add the task's tab in it, but don't focus it
// * `never` — do not alter focus, but still add/reuse the task's tab in its pane
"reveal": "always",
// What to do with the terminal pane and tab, after the command has finished:
// * `never` — Do nothing when the command finishes (default)
// * `always` — always hide the terminal tab, hide the pane also if it was the last tab in it
// * `on_success` — hide the terminal tab on task success only, otherwise behaves similar to `always`
"hide": "never",
// Which shell to use when running a task inside the terminal.
// May take 3 values:
// 1. (default) Use the system's default terminal configuration in /etc/passwd
// "shell": "system"
// 2. A program:
// "shell": {
// "program": "sh"
// }
// 3. A program with arguments:
// "shell": {
// "with_arguments": {
// "program": "/bin/bash",
// "args": ["--login"]
// }
// }
"shell": "system",
// Whether to show the task line in the output of the spawned task, defaults to `true`.
"show_summary": true,
// Whether to show the command line in the output of the spawned task, defaults to `true`.
// "show_output": true,
// Represents the tags for inline runnable indicators, or spawning multiple tasks at once.
"tags": ["DODO"]
}
]

View File

@@ -24,7 +24,7 @@ Thank you for your interest in contributing to Herolib! This document provides g
For developers, you can use the automated installation script:
```bash
curl 'https://raw.githubusercontent.com/incubaid/herolib/refs/heads/development/install_v.sh' > /tmp/install_v.sh
curl 'https://raw.githubusercontent.com/incubaid/herolib/refs/heads/development/scripts/install_v.sh' > /tmp/install_v.sh
bash /tmp/install_v.sh --analyzer --herolib
# IMPORTANT: Start a new shell after installation for paths to be set correctly
```

View File

@@ -14,7 +14,7 @@ Herolib is an opinionated library primarily used by ThreeFold to automate cloud
The Hero tool can be installed with a single command:
```bash
curl https://raw.githubusercontent.com/incubaid/herolib/refs/heads/development/install_hero.sh | bash
curl https://raw.githubusercontent.com/incubaid/herolib/refs/heads/development/scripts/install_hero.sh | bash
```
Hero will be installed in:
@@ -35,11 +35,11 @@ The Hero tool can be used to work with git, build documentation, interact with H
For development purposes, use the automated installation script:
```bash
curl 'https://raw.githubusercontent.com/incubaid/herolib/refs/heads/development/install_v.sh' > /tmp/install_v.sh
curl 'https://raw.githubusercontent.com/incubaid/herolib/refs/heads/development/scripts/install_v.sh' > /tmp/install_v.sh
bash /tmp/install_v.sh --analyzer --herolib
#do not forget to do the following this makes sure vtest and vrun exists
cd ~/code/github/incubaid/herolib
cd ~/code/github/incubaid/herolib/scripts
v install_herolib.vsh
# IMPORTANT: Start a new shell after installation for paths to be set correctly
@@ -51,7 +51,7 @@ v install_herolib.vsh
```
V & HeroLib Installer Script
Usage: ~/code/github/incubaid/herolib/install_v.sh [options]
Usage: ~/code/github/incubaid/herolib/scripts/install_v.sh [options]
Options:
-h, --help Show this help message
@@ -61,12 +61,12 @@ Options:
--herolib Install our herolib
Examples:
~/code/github/incubaid/herolib/install_v.sh
~/code/github/incubaid/herolib/install_v.sh --reset
~/code/github/incubaid/herolib/install_v.sh --remove
~/code/github/incubaid/herolib/install_v.sh --analyzer
~/code/github/incubaid/herolib/install_v.sh --herolib
~/code/github/incubaid/herolib/install_v.sh --reset --analyzer # Fresh install of both
~/code/github/incubaid/herolib/scripts/install_v.sh
~/code/github/incubaid/herolib/scripts/install_v.sh --reset
~/code/github/incubaid/herolib/scripts/install_v.sh --remove
~/code/github/incubaid/herolib/scripts/install_v.sh --analyzer
~/code/github/incubaid/herolib/scripts/install_v.sh --herolib
~/code/github/incubaid/herolib/scripts/install_v.sh --reset --analyzer # Fresh install of both
```
## Features
@@ -175,7 +175,3 @@ To generate documentation locally:
cd ~/code/github/incubaid/herolib
bash doc.sh
```
<!-- Security scan triggered at 2025-09-02 01:58:41 -->
<!-- Security scan triggered at 2025-09-09 05:33:18 -->

View File

@@ -16,4 +16,4 @@ NC='\033[0m' # No Color
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
/workspace/herolib/install_v.sh
/workspace/herolib/scripts/install_v.sh

View File

@@ -5,12 +5,15 @@ This file provides guidance to WARP (warp.dev) when working with code in this re
## Commands to Use
### Testing
- **Run Tests**: Utilize `vtest ~/code/github/incubaid/herolib/lib/osal/package_test.v` to run specific tests.
## High-Level Architecture
- **Project Structure**: The project is organized into multiple modules located in `lib` and `src` directories. Prioritized compilation and caching strategies are utilized across modules.
- **Script Handling**: Vlang scripts are crucial and should follow instructions from `aiprompts/vlang_herolib_core.md`.
## Special Instructions
- **Documentation Reference**: Always refer to `aiprompts/vlang_herolib_core.md` for essential instructions regarding Vlang and Heroscript code generation and execution.
- **Environment Specifics**: Ensure Redis and other dependencies are configured as per scripts provided in the codebase.

View File

@@ -2,11 +2,12 @@
## Overview
This document provides clear instructions for AI agents to create new HeroDB models similar to `message.v`. These models are used to store structured data in Redis using the HeroDB system.
This document provides clear instructions for AI agents to create new HeroDB models similar to `message.v`.
These models are used to store structured data in Redis using the HeroDB system.
The message.v can be found in `lib/hero/heromodels/message.v`.s
## Key Concepts
- Each model represents a data type stored in Redis hash sets
- Models must implement serialization/deserialization using the `encoder` module
- Models inherit from the `Base` struct which provides common fields
- The database uses a factory pattern for model access

View File

@@ -1,36 +1,41 @@
# Doctree Export Specification
## Overview
The `doctree` module in `lib/data/doctree` is responsible for processing and exporting documentation trees. This involves taking a structured representation of documentation (collections, pages, images, files) and writing it to a specified file system destination. Additionally, it leverages Redis to store metadata about the exported documentation, facilitating quick lookups and integration with other systems.
## Key Components
### `lib/data/doctree/export.v`
This file defines the main `export` function for the `Tree` object. It orchestrates the overall export process:
- Takes `TreeExportArgs` which includes parameters like `destination`, `reset` (to clear destination), `keep_structure`, `exclude_errors`, `toreplace` (for regex replacements), `concurrent` (for parallel processing), and `redis` (to control Redis metadata storage).
- Processes definitions, includes, actions, and macros within the `Tree`.
- Generates file paths for pages, images, and other files.
- Iterates through `Collection` objects within the `Tree` and calls their respective `export` methods, passing down the `redis` flag.
### `lib/data/doctree/collection/export.v`
This file defines the `export` function for the `Collection` object. This is where the actual file system writing and Redis interaction for individual collections occur:
- Takes `CollectionExportArgs` which includes `destination`, `file_paths`, `reset`, `keep_structure`, `exclude_errors`, `replacer`, and the `redis` flag.
- Creates a `.collection` file in the destination directory with basic collection information.
- **Redis Integration**:
- Obtains a Redis client using `base.context().redis()`.
- Stores the collection's destination path in Redis using `redis.hset('doctree:path', 'collection_name', 'destination_path')`.
- Calls `export_pages`, `export_files`, `export_images`, and `export_linked_pages` which all interact with Redis if the `redis` flag is true.
- Obtains a Redis client using `base.context().redis()`.
- Stores the collection's destination path in Redis using `redis.hset('doctree:path', 'collection_name', 'destination_path')`.
- Calls `export_pages`, `export_files`, `export_images`, and `export_linked_pages` which all interact with Redis if the `redis` flag is true.
- **`export_pages`**:
- Processes page links and handles not-found errors.
- Writes markdown content to the destination file system.
- Stores page metadata in Redis: `redis.hset('doctree:collection_name', 'page_name', 'page_file_name.md')`.
- Processes page links and handles not-found errors.
- Writes markdown content to the destination file system.
- Stores page metadata in Redis: `redis.hset('doctree:collection_name', 'page_name', 'page_file_name.md')`.
- **`export_files` and `export_images`**:
- Copies files and images to the destination directory (e.g., `img/`).
- Stores file/image metadata in Redis: `redis.hset('doctree:collection_name', 'file_name', 'img/file_name.ext')`.
- Copies files and images to the destination directory (e.g., `img/`).
- Stores file/image metadata in Redis: `redis.hset('doctree:collection_name', 'file_name', 'img/file_name.ext')`.
- **`export_linked_pages`**:
- Gathers linked pages within the collection.
- Writes a `.linkedpages` file.
- Stores linked pages file metadata in Redis: `redis.hset('doctree:collection_name', 'linkedpages', 'linkedpages_file_name.md')`.
- Gathers linked pages within the collection.
- Writes a `.linkedpages` file.
- Stores linked pages file metadata in Redis: `redis.hset('doctree:collection_name', 'linkedpages', 'linkedpages_file_name.md')`.
## Link between Redis and Export
@@ -43,4 +48,4 @@ This Redis integration serves as a quick lookup mechanism for other applications
Yes, the export functionality is crucial for making the processed `doctree` content available outside the internal `doctree` representation.
- **File System Export**: The core purpose of the export is to write the documentation content (markdown files, images, other assets) to a specified directory. This is essential for serving the documentation via a web server, integrating with static site generators (like Docusaurus, as suggested by other files in the project), or simply providing a browsable version of the documentation.
- **Redis Metadata**: While the file system export is fundamental, the Redis metadata storage is an important complementary feature. It provides an efficient way for other systems to programmatically discover and locate documentation assets. If there are downstream applications that rely on this Redis metadata for navigation, search, or content delivery, then the Redis part of the export is indeed needed. If no such applications exist or are planned, the `redis` flag can be set to `false` to skip this step, but the file system export itself remains necessary for external consumption of the documentation.
- **Redis Metadata**: While the file system export is fundamental, the Redis metadata storage is an important complementary feature. It provides an efficient way for other systems to programmatically discover and locate documentation assets. If there are downstream applications that rely on this Redis metadata for navigation, search, or content delivery, then the Redis part of the export is indeed needed. If no such applications exist or are planned, the `redis` flag can be set to `false` to skip this step, but the file system export itself remains necessary for external consumption of the documentation.

View File

@@ -0,0 +1,378 @@
# HeroPrompt Module
The `heroprompt` module provides a hierarchical workspace-based system for organizing code files and generating structured AI prompts. It enables developers to select files from multiple directories and generate formatted prompts for AI code analysis.
## Key Features
- **Hierarchical Organization**: HeroPrompt → Workspace → Directory → Files
- **Redis Persistence**: All data persists across sessions using Redis
- **Factory Pattern**: Clean API with `get()`, `delete()`, `exists()`, `list()` functions
- **File Selection**: Select specific files or entire directories for analysis
- **Active Workspace**: Manage multiple workspaces with one active at a time
- **Prompt Generation**: Generate structured prompts with file maps, contents, and instructions
- **Template-Based**: Uses V templates for consistent prompt formatting
## Basic Usage
### 1. Getting Started
```v
import incubaid.herolib.develop.heroprompt
// Create or get a HeroPrompt instance
mut hp := heroprompt.get(name: 'my_project', create: true)!
// Create a workspace (first workspace is automatically active)
mut workspace := hp.new_workspace(
name: 'my_workspace'
description: 'My project workspace'
)!
```
### 2. Adding Directories
```v
// Add directory and automatically scan all files
mut dir := workspace.add_directory(
path: '/path/to/your/code'
name: 'backend'
scan: true // Scans all files and subdirectories
)!
// Add another directory
mut frontend_dir := workspace.add_directory(
path: '/path/to/frontend'
name: 'frontend'
scan: true
)!
```
### 3. Selecting Files
```v
// Select specific files
dir.select_file(path: '/path/to/your/code/main.v')!
dir.select_file(path: '/path/to/your/code/utils.v')!
// Or select all files in a directory
frontend_dir.select_all()!
// Deselect files
dir.deselect_file(path: '/path/to/your/code/test.v')!
// Deselect all files
dir.deselect_all()!
```
### 4. Generating AI Prompts
```v
// Generate prompt with selected files
prompt := workspace.generate_prompt(
instruction: 'Review these files and suggest improvements'
)!
println(prompt)
// Or generate with specific files (overrides selection)
prompt2 := workspace.generate_prompt(
instruction: 'Analyze these specific files'
selected_files: ['/path/to/file1.v', '/path/to/file2.v']
)!
```
## Factory Functions
### `heroprompt.get(name: string, create: bool) !HeroPrompt`
Gets or creates a HeroPrompt instance.
```v
// Get existing instance or create new one
mut hp := heroprompt.get(name: 'my_project', create: true)!
// Get existing instance only (error if doesn't exist)
mut hp2 := heroprompt.get(name: 'my_project')!
```
### `heroprompt.delete(name: string) !`
Deletes a HeroPrompt instance from Redis.
```v
heroprompt.delete(name: 'my_project')!
```
### `heroprompt.exists(name: string) !bool`
Checks if a HeroPrompt instance exists.
```v
if heroprompt.exists(name: 'my_project')! {
println('Instance exists')
}
```
### `heroprompt.list() ![]string`
Lists all HeroPrompt instance names.
```v
instances := heroprompt.list()!
for name in instances {
println('Instance: ${name}')
}
```
## HeroPrompt Methods
### Workspace Management
#### `hp.new_workspace(name: string, description: string, is_active: bool) !&Workspace`
Creates a new workspace. The first workspace is automatically set as active.
```v
mut ws := hp.new_workspace(
name: 'backend'
description: 'Backend API workspace'
)!
```
#### `hp.get_workspace(name: string) !&Workspace`
Retrieves an existing workspace by name.
```v
mut ws := hp.get_workspace('backend')!
```
#### `hp.get_active_workspace() !&Workspace`
Returns the currently active workspace.
```v
mut active := hp.get_active_workspace()!
println('Active workspace: ${active.name}')
```
#### `hp.set_active_workspace(name: string) !`
Sets a workspace as active (deactivates all others).
```v
hp.set_active_workspace('frontend')!
```
#### `hp.list_workspaces() []&Workspace`
Lists all workspaces in the instance.
```v
workspaces := hp.list_workspaces()
for ws in workspaces {
println('Workspace: ${ws.name}')
}
```
#### `hp.delete_workspace(name: string) !`
Deletes a workspace.
```v
hp.delete_workspace('old_workspace')!
```
## Workspace Methods
### Directory Management
#### `ws.add_directory(path: string, name: string, scan: bool) !&Directory`
Adds a directory to the workspace.
```v
mut dir := ws.add_directory(
path: '/path/to/code'
name: 'my_code'
scan: true // Automatically scans all files
)!
```
#### `ws.list_directories() []&Directory`
Lists all directories in the workspace.
```v
dirs := ws.list_directories()
for dir in dirs {
println('Directory: ${dir.name}')
}
```
#### `ws.remove_directory(id: string) !`
Removes a directory from the workspace.
```v
ws.remove_directory(id: dir.id)!
```
### Prompt Generation
#### `ws.generate_prompt(instruction: string, selected_files: []string, show_all_files: bool) !string`
Generates a complete AI prompt with file map, contents, and instructions.
```v
// Use selected files (from select_file() calls)
prompt := ws.generate_prompt(
instruction: 'Review the code'
)!
// Or specify files explicitly
prompt2 := ws.generate_prompt(
instruction: 'Analyze these files'
selected_files: ['/path/to/file1.v', '/path/to/file2.v']
show_all_files: false
)!
```
#### `ws.generate_file_map(selected_files: []string, show_all: bool) !string`
Generates a hierarchical tree structure of files.
```v
file_map := ws.generate_file_map(
selected_files: ['/path/to/file1.v']
show_all: false
)!
println(file_map)
```
#### `ws.generate_file_contents(selected_files: []string, include_path: bool) !string`
Generates formatted file contents.
```v
contents := ws.generate_file_contents(
selected_files: ['/path/to/file1.v']
include_path: true
)!
println(contents)
```
## Directory Methods
### File Selection
#### `dir.select_file(path: string) !`
Marks a file as selected.
```v
dir.select_file(path: '/path/to/file.v')!
```
#### `dir.select_all() !`
Selects all files in the directory and subdirectories.
```v
dir.select_all()!
```
#### `dir.deselect_file(path: string) !`
Deselects a file.
```v
dir.deselect_file(path: '/path/to/file.v')!
```
#### `dir.deselect_all() !`
Deselects all files in the directory.
```v
dir.deselect_all()!
```
### Directory Information
#### `dir.exists() bool`
Checks if the directory exists on the filesystem.
```v
if dir.exists() {
println('Directory exists')
}
```
#### `dir.get_contents() !DirectoryContent`
Gets all files in the directory (scans if needed).
```v
content := dir.get_contents()!
println('Files: ${content.files.len}')
```
## Generated Prompt Format
The generated prompt uses a template with three sections:
```prompt
<user_instructions>
Review these files and suggest improvements
</user_instructions>
<file_map>
my_project/
├── src/
│ ├── main.v *
│ └── utils.v *
└── README.md *
</file_map>
<file_contents>
File: /path/to/src/main.v
\```v
module main
fn main() {
println('Hello')
}
\```
</file_contents>
```
Files marked with `*` in the file_map are the selected files included in the prompt.
## Complete Example
```v
import incubaid.herolib.develop.heroprompt
mut hp := heroprompt.get(name: 'my_app', create: true)!
mut ws := hp.new_workspace(name: 'backend')!
mut src_dir := ws.add_directory(path: '/path/to/src', name: 'source', scan: true)!
src_dir.select_file(path: '/path/to/src/main.v')!
prompt := ws.generate_prompt(instruction: 'Review the code')!
println(prompt)
heroprompt.delete(name: 'my_app')!
```
## Tips
- Use `heroprompt.delete()` at start for fresh state
- First workspace is automatically active
- Changes auto-save to Redis
- Use `scan: true` to discover all files
- Create separate workspaces for different contexts

View File

@@ -0,0 +1,371 @@
# Unpoly API Complete Reference & Operational Guide for AI Coding Agents
## Introduction
This document serves as a complete, self-contained reference manual for the **Unpoly JavaScript API**. It is written explicitly for AI coding agents that need to generate, validate, or refactor Unpoly-enhanced HTML and JavaScript code.
Unpoly is a front-end framework for progressive enhancement. It allows HTML pages to update fragments instead of reloading the full document, enabling seamless transitions, modals, caching, and navigation while maintaining server-side rendering.
An AI agent should use this manual to:
- Understand each Unpoly module and its purpose.
- Generate correct `up-*` attributes and JavaScript calls.
- Detect configuration errors and provide correction suggestions.
- Apply Unpoly in automated scaffolding, validation, and optimization workflows.
---
## 1. The up.link Module
### Purpose
Handles Unpoly-enhanced navigation. Converts normal links into AJAX-based fragment updates rather than full-page reloads.
### Core Concepts
When a user clicks a link with certain attributes, Unpoly intercepts the event and fetches the new page in the background. It then replaces specified fragments in the current document with matching elements from the response.
### Common Attributes
| Attribute | Description |
| --------------- | -------------------------------------------------------- |
| `up-follow` | Marks the link as handled by Unpoly. Usually implied. |
| `up-target` | CSS selector identifying which fragment(s) to replace. |
| `up-method` | Overrides HTTP method (e.g. `GET`, `POST`). |
| `up-params` | Adds query parameters to the request. |
| `up-headers` | Adds or overrides HTTP headers. |
| `up-layer` | Determines which layer (page, overlay, modal) to update. |
| `up-transition` | Defines animation during fragment replacement. |
| `up-cache` | Enables caching of the response. |
| `up-history` | Controls browser history behavior. |
### JavaScript API Methods
- `up.link.isFollowable(element)` Returns true if Unpoly will intercept the link.
- `up.link.follow(element, options)` Programmatically follow the link via Unpoly.
- `up.link.preload(element, options)` Preload the linked resource into the cache.
### Agent Reasoning & Validation
- Ensure that every `up-follow` element has a valid `up-target` selector.
- Validate that target elements exist in both the current DOM and the server response.
- Recommend `up-cache` for commonly visited links to improve performance.
- Prevent using `target="_blank"` or `download` attributes with Unpoly links.
### Example
```html
<a href="/profile" up-target="#main" up-transition="fade">View Profile</a>
```
---
## 2. The up.form Module
### Purpose
Handles progressive enhancement for forms. Submissions happen via AJAX and update only specific fragments.
### Core Attributes
| Attribute | Description |
| ---------------- | --------------------------------------- |
| `up-submit` | Marks form to be submitted via Unpoly. |
| `up-target` | Fragment selector to update on success. |
| `up-fail-target` | Selector to update if submission fails. |
| `up-validate` | Enables live field validation. |
| `up-autosubmit` | Submits automatically on change. |
| `up-disable-for` | Disables fields during request. |
| `up-enable-for` | Enables fields after request completes. |
### JavaScript API
- `up.form.submit(form, options)` Submit programmatically.
- `up.validate(field, options)` Trigger server validation.
- `up.form.fields(form)` Returns all input fields.
### Agent Reasoning
- Always ensure form has both `action` and `method` attributes.
- Match `up-target` to an element existing in the rendered HTML.
- For validation, ensure server supports `X-Up-Validate` header.
- When generating forms, add `up-fail-target` to handle errors gracefully.
### Example
```html
<form action="/update" method="POST" up-submit up-target="#user-info" up-fail-target="#form-errors">
<input name="email" up-validate required>
<button type="submit">Save</button>
</form>
```
---
## 3. The up.layer Module
### Purpose
Manages overlays, modals, and stacked layers of navigation.
### Attributes
| Attribute | Description |
| ---------------- | -------------------------------------------------- |
| `up-layer="new"` | Opens content in a new overlay. |
| `up-size` | Controls modal size (e.g., `small`, `large`). |
| `up-dismissable` | Allows overlay to close by clicking outside. |
| `up-history` | Determines if the overlay updates browser history. |
| `up-title` | Sets overlay title. |
### JavaScript API
- `up.layer.open(options)` Opens a new layer.
- `up.layer.close(layer)` Closes a given layer.
- `up.layer.on(event, callback)` Hooks into lifecycle events.
### Agent Notes
- Ensure `up-layer="new"` only used with valid targets.
- For overlays, set `up-history="false"` unless explicitly required.
- Auto-generate dismiss buttons with `up-layer-close`.
### Example
```html
<a href="/settings" up-layer="new" up-size="large" up-target=".modal-content">Open Settings</a>
```
---
## 4. The up.fragment Module
### Purpose
Handles low-level fragment rendering, preserving, replacing, and merging.
### JavaScript API
- `up.render(options)` Replace fragment(s) with new content.
- `up.fragment.config` Configure defaults for rendering.
- `up.fragment.get(target)` Retrieve a fragment.
### Example
```js
up.render({ target: '#main', url: '/dashboard', transition: 'fade' })
```
### Agent Notes
- Ensure only fragment HTML is sent from server (not full document).
- Use `preserve` for elements like forms where input state matters.
---
## 5. The up.network Module
### Purpose
Handles network requests, caching, and aborting background loads.
### JavaScript API
- `up.network.loadPage(url, options)` Load a page via Unpoly.
- `up.network.abort()` Abort ongoing requests.
- `up.network.config.timeout` Default timeout setting.
### Agent Tasks
- Preload probable links (`up.link.preload`).
- Use caching for frequent calls.
- Handle `up:network:late` event to show spinners.
---
## 6. The up.event Module
### Purpose
Manages custom events fired throughout Unpolys lifecycle.
### Common Events
- `up:link:follow`
- `up:form:submit`
- `up:layer:open`
- `up:layer:close`
- `up:rendered`
- `up:network:late`
### Example
```js
up.on('up:layer:close', (event) => {
console.log('Overlay closed');
});
```
### Agent Actions
- Register listeners for key events.
- Prevent duplicate bindings.
- Offer analytics hooks for `up:rendered` or `up:location:changed`.
---
## 7. The up.motion Module
Handles animations and transitions.
### API
- `up.motion()` Animate elements.
- `up.animate(element, keyframes, options)` Custom animation.
### Agent Notes
- Suggest `up-transition="fade"` or similar for fragment changes.
- Avoid heavy animations for performance-sensitive devices.
---
## 8. The up.radio Module
Handles broadcasting and receiving cross-fragment events.
### Example
```js
up.radio.emit('user:updated', { id: 5 })
up.radio.on('user:updated', (data) => console.log(data))
```
### Agent Tasks
- Use for coordinating multiple fragments.
- Ensure channel names are namespaced (e.g., `form:valid`, `modal:open`).
---
## 9. The up.history Module
### Purpose
Manages URL history, titles, and restoration.
### API
- `up.history.push(url, options)` Push new history entry.
- `up.history.restore()` Restore previous state.
### Agent Guidance
- Disable history (`up-history="false"`) for temporary overlays.
- Ensure proper title update via `up-title`.
---
## 10. The up.viewport Module
### Purpose
Manages scrolling, focusing, and viewport restoration.
### API
- `up.viewport.scroll(element)` Scroll to element.
- `up.viewport.restoreScroll()` Restore previous position.
### Agent Tasks
- Restore scroll after fragment updates.
- Manage focus for accessibility after `up.render()`.
---
## 11. The up.protocol Module
Handles request headers and special HTTP status codes.
### Key Concepts
- Adds `X-Up-Target` and `X-Up-Validate` headers.
- Interprets redirect and validation responses.
### Agent Notes
- Ensure backend supports Unpoly headers.
- Warn if 204 or redirect status used without fragments.
---
## 12. The up.util Module
Contains helpers for arrays, strings, and timing.
### Examples
```js
up.util.escapeHTML("<script>") // "&lt;script&gt;"
up.util.copy(text)
```
### Agent Tasks
- Use helpers for DOM-safe generation.
- Avoid reimplementing basic utilities.
---
## 13. The up.framework and up.log Modules
- `up.framework.init()` Initializes Unpoly.
- `up.log.enable()` Enables debug logging.
### Agent Tasks
- Always call `up.framework.init()` once.
- Enable logging during debug sessions.
---
Good addition — including **Unpoly from a CDN** is essential for setup and automation.
Heres the section you can append to the Markdown file (I can also regenerate the full file with it included if you prefer):
---
## 14. Including Unpoly from CDN
### Purpose
Before any Unpoly features or attributes (`up-*`) can work, the library must be loaded into the page.
For AI agents generating HTML pages or templates, always include Unpolys **CSS** and **JavaScript** assets early in the `<head>` section.
### Basic Setup (Latest Stable Version)
```html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>My Unpoly Page</title>
<!-- Unpoly Core Styles -->
<link rel="stylesheet" href="https://unpkg.com/unpoly@3.7.2/unpoly.min.css">
<!-- Optional Bootstrap integration -->
<link rel="stylesheet" href="https://unpkg.com/unpoly@3.7.2/unpoly-bootstrap3.min.css">
<!-- Unpoly Core Script -->
<script src="https://unpkg.com/unpoly@3.7.2/unpoly.min.js"></script>
<!-- Initialize framework -->
<script>
up.framework.init()
</script>
</head>
<body>
<!-- Page content -->
</body>
</html>
```
### AI Agent Notes
* Always include **`unpoly.min.css`** before any custom styles to ensure default animations and transitions function properly.
* **Never** load Unpoly twice; check if the page already has a `<script src="...unpoly.min.js">`.
* After loading, the agent may immediately use Unpoly JavaScript APIs (`up.render`, `up.link.follow`, etc.) or rely on HTML attributes.
* If dynamically injecting HTML pages, the agent should re-run `up.framework.init()` **only once globally**, not after every fragment load.
### Recommended CDN Sources
* `https://unpkg.com/unpoly@3.x/`
* `https://cdn.jsdelivr.net/npm/unpoly@3.x/`
### Offline Use
For fully offline or embedded environments, the agent can download both `.js` and `.css` files and reference them locally:
```html
<link rel="stylesheet" href="/assets/unpoly.min.css">
<script src="/assets/unpoly.min.js"></script>
```
---
## Agent Validation Checklist
1. Verify `up-*` attributes match existing fragments.
2. Check backend returns valid fragment markup.
3. Ensure forms use `up-submit` and `up-fail-target`.
4. Overlay layers must have dismissable controls.
5. Use caching wisely (`up-cache`, `up.link.preload`).
6. Handle network and render events gracefully.
7. Log events (`up.log`) for debugging.
8. Confirm scroll/focus restoration after renders.
9. Gracefully degrade if JavaScript disabled.
10. Document reasoning and configuration.

View File

@@ -0,0 +1,647 @@
# Unpoly Quick Reference for AI Agents
## Installation
Include Unpoly from CDN in your HTML `<head>`:
```html
<script src="https://unpoly.com/unpoly.min.js"></script>
<link rel="stylesheet" href="https://unpoly.com/unpoly.min.css">
```
## Core Concept
Unpoly updates page fragments without full page reloads. Users click links/submit forms → server responds with HTML → Unpoly extracts and swaps matching fragments.
---
## 1. Following Links (Fragment Updates)
### Basic Link Following
```html
<a href="/users/5" up-follow>View User</a>
```
Updates the `<main>` element (or `<body>` if no main exists) with content from `/users/5`.
### Target Specific Fragment
```html
<a href="/users/5" up-target=".user-details">View User</a>
<div class="user-details">
<!-- Content replaced here -->
</div>
```
### Multiple Fragments
```html
<a href="/users/5" up-target=".profile, .activity">View User</a>
```
Updates both `.profile` and `.activity` from single response.
### Append/Prepend Content
```html
<!-- Append to list -->
<a href="/items?page=2" up-target=".items:after">Load More</a>
<!-- Prepend to list -->
<a href="/latest" up-target=".items:before">Show Latest</a>
```
### Handle All Links Automatically
```js
up.link.config.followSelectors.push('a[href]')
```
Now all links update fragments by default.
---
## 2. Submitting Forms
### Basic Form Submission
```html
<form action="/users" method="post" up-submit>
<input name="email">
<button type="submit">Create</button>
</form>
```
Submits via AJAX and updates `<main>` with response.
### Target Specific Fragment
```html
<form action="/search" up-submit up-target=".results">
<input name="query">
<button>Search</button>
</form>
<div class="results">
<!-- Search results appear here -->
</div>
```
### Handle Success vs. Error Responses
```html
<form action="/users" method="post" up-submit
up-target="#success"
up-fail-target="form">
<input name="email">
<button>Create</button>
</form>
<div id="success">Success message here</div>
```
- **Success (2xx status)**: Updates `#success`
- **Error (4xx/5xx status)**: Re-renders `form` with validation errors
**Server must return HTTP 422** (or similar error code) for validation failures.
---
## 3. Opening Overlays (Modal, Drawer, Popup)
### Modal Dialog
```html
<a href="/details" up-layer="new">Open Modal</a>
```
Opens `/details` in a modal overlay.
### Drawer (Sidebar)
```html
<a href="/menu" up-layer="new drawer">Open Drawer</a>
```
### Popup (Anchored to Link)
```html
<a href="/help" up-layer="new popup">Help</a>
```
### Close Overlay When Condition Met
```html
<a href="/users/new"
up-layer="new"
up-accept-location="/users/$id"
up-on-accepted="console.log('Created user:', value.id)">
New User
</a>
```
Overlay auto-closes when URL matches `/users/123`, passes `{ id: 123 }` to callback.
### Local Content (No Server Request)
```html
<a up-layer="new popup" up-content="<p>Help text here</p>">Help</a>
```
---
## 4. Validation
### Validate on Field Change
```html
<form action="/users" method="post">
<input name="email" up-validate>
<input name="password" up-validate>
<button type="submit">Register</button>
</form>
```
When field loses focus → submits form with `X-Up-Validate: email` header → server re-renders form → Unpoly updates the field's parent `<fieldset>` (or closest form group).
**Server must return HTTP 422** for validation errors.
### Validate While Typing
```html
<input name="email" up-validate
up-watch-event="input"
up-watch-delay="300">
```
Validates 300ms after user stops typing.
---
## 5. Lazy Loading & Polling
### Load When Element Appears in DOM
```html
<div id="menu" up-defer up-href="/menu">
Loading menu...
</div>
```
Immediately loads `/menu` when placeholder renders.
### Load When Scrolled Into View
```html
<div id="comments" up-defer="reveal" up-href="/comments">
Loading comments...
</div>
```
Loads when element scrolls into viewport.
### Auto-Refresh (Polling)
```html
<div class="status" up-poll up-interval="5000">
Current status
</div>
```
Reloads fragment every 5 seconds from original URL.
---
## 6. Caching & Revalidation
### Enable Caching
```html
<a href="/users" up-cache="true">Users</a>
```
Caches response, instantly shows cached content, then revalidates with server.
### Disable Caching
```html
<a href="/stock" up-cache="false">Live Prices</a>
```
### Conditional Requests (Server-Side)
Server sends:
```http
HTTP/1.1 200 OK
ETag: "abc123"
<div class="data">Content</div>
```
Next reload, Unpoly sends:
```http
GET /path
If-None-Match: "abc123"
```
Server responds `304 Not Modified` if unchanged → saves bandwidth.
---
## 7. Navigation Bar (Current Link Highlighting)
```html
<nav>
<a href="/home">Home</a>
<a href="/about">About</a>
</nav>
```
Current page link gets `.up-current` class automatically.
**Style it:**
```css
.up-current {
font-weight: bold;
color: blue;
}
```
---
## 8. Loading State
### Feedback Classes
Automatically applied:
- `.up-active` on clicked link/button
- `.up-loading` on targeted fragment
**Style them:**
```css
.up-active { opacity: 0.6; }
.up-loading { opacity: 0.8; }
```
### Disable Form While Submitting
```html
<form up-submit up-disable>
<input name="email">
<button>Submit</button>
</form>
```
All fields disabled during submission.
### Show Placeholder While Loading
```html
<a href="/data" up-target=".data"
up-placeholder="<p>Loading...</p>">
Load Data
</a>
```
---
## 9. Preloading
### Preload on Hover
```html
<a href="/users/5" up-preload>User Profile</a>
```
Starts loading when user hovers (90ms delay by default).
### Preload Immediately
```html
<a href="/menu" up-preload="insert">Menu</a>
```
Loads as soon as link appears in DOM.
---
## 10. Templates (Client-Side HTML)
### Define Template
```html
<template id="user-card">
<div class="card">
<h3>{{name}}</h3>
<p>{{email}}</p>
</div>
</template>
```
### Use Template
```html
<a up-fragment="#user-card"
up-use-data="{ name: 'Alice', email: 'alice@example.com' }">
Show User
</a>
```
**Process variables with compiler:**
```js
up.compiler('.card', function(element, data) {
element.innerHTML = element.innerHTML
.replace(/{{name}}/g, data.name)
.replace(/{{email}}/g, data.email)
})
```
---
## 11. JavaScript API
### Render Fragment
```js
up.render({
url: '/users/5',
target: '.user-details'
})
```
### Navigate (Updates History)
```js
up.navigate({
url: '/users',
target: 'main'
})
```
### Submit Form
```js
let form = document.querySelector('form')
up.submit(form)
```
### Open Overlay
```js
up.layer.open({
url: '/users/new',
onAccepted: (event) => {
console.log('User created:', event.value)
}
})
```
### Close Overlay with Value
```js
up.layer.accept({ id: 123, name: 'Alice' })
```
### Reload Fragment
```js
up.reload('.status')
```
---
## 12. Request Headers (Server Protocol)
Unpoly sends these headers with requests:
| Header | Value | Purpose |
| --------------- | -------- | ------------------------------- |
| `X-Up-Version` | `1.0.0` | Identifies Unpoly request |
| `X-Up-Target` | `.users` | Fragment selector being updated |
| `X-Up-Mode` | `modal` | Current layer mode |
| `X-Up-Validate` | `email` | Field being validated |
**Server can respond with:**
| Header | Effect |
| ------------------------ | ------------------------ |
| `X-Up-Target: .other` | Changes target selector |
| `X-Up-Accept-Layer: {}` | Closes overlay (success) |
| `X-Up-Dismiss-Layer: {}` | Closes overlay (cancel) |
---
## 13. Common Patterns
### Infinite Scrolling
```html
<div id="items">
<div>Item 1</div>
<div>Item 2</div>
</div>
<a id="next" href="/items?page=2"
up-defer="reveal"
up-target="#items:after, #next">
Load More
</a>
```
### Dependent Form Fields
```html
<form action="/order">
<!-- Changing country updates city select -->
<select name="country" up-validate="#city">
<option>USA</option>
<option>Canada</option>
</select>
<select name="city" id="city">
<option>New York</option>
</select>
</form>
```
### Confirm Before Action
```html
<a href="/delete" up-method="delete"
up-confirm="Really delete?">
Delete
</a>
```
### Auto-Submit on Change
```html
<form action="/search" up-autosubmit>
<input name="query">
</form>
```
Submits form when any field changes.
---
## 14. Error Handling
### Handle Network Errors
```js
up.on('up:fragment:offline', function(event) {
if (confirm('You are offline. Retry?')) {
event.retry()
}
})
```
### Handle Failed Responses
```js
try {
await up.render({ url: '/path', target: '.data' })
} catch (error) {
if (error instanceof up.RenderResult) {
console.log('Server error:', error)
}
}
```
---
## 15. Compilers (Enhance Elements)
### Basic Compiler
```js
up.compiler('.current-time', function(element) {
element.textContent = new Date().toString()
})
```
Runs when `.current-time` is inserted (initial load OR fragment update).
### Compiler with Cleanup
```js
up.compiler('.auto-refresh', function(element) {
let timer = setInterval(() => {
element.textContent = new Date().toString()
}, 1000)
// Return destructor function
return () => clearInterval(timer)
})
```
Destructor called when element is removed from DOM.
---
## Quick Reference Table
| Task | HTML | JavaScript |
| --------------- | ---------------------------- | -------------------------- |
| Follow link | `<a href="/path" up-follow>` | `up.follow(link)` |
| Submit form | `<form up-submit>` | `up.submit(form)` |
| Target fragment | `up-target=".foo"` | `{ target: '.foo' }` |
| Open modal | `up-layer="new"` | `up.layer.open({ url })` |
| Validate field | `up-validate` | `up.validate(field)` |
| Lazy load | `up-defer` | — |
| Poll fragment | `up-poll` | — |
| Preload link | `up-preload` | `up.link.preload(link)` |
| Local content | `up-content="<p>Hi</p>"` | `{ content: '<p>Hi</p>' }` |
| Append content | `up-target=".list:after"` | — |
| Confirm action | `up-confirm="Sure?"` | `{ confirm: 'Sure?' }` |
---
## Key Defaults
- **Target**: Updates `<main>` (or `<body>`) if no `up-target` specified
- **Caching**: Auto-enabled for GET requests during navigation
- **History**: Auto-updated when rendering `<main>` or major fragments
- **Scrolling**: Auto-scrolls to top when updating `<main>`
- **Focus**: Auto-focuses new fragment
- **Validation**: Targets field's parent `<fieldset>` or form group
---
## Best Practices for AI Agents
1. **Always provide HTTP error codes**: Return 422 for validation errors, 404 for not found, etc.
2. **Send full HTML responses**: Include entire page structure; Unpoly extracts needed fragments
3. **Use semantic HTML**: `<main>`, `<nav>`, `<form>` elements work best
4. **Set IDs on fragments**: Makes targeting easier (e.g., `<div id="user-123">`)
5. **Return consistent selectors**: If request targets `.users`, response must contain `.users`
---
## Common Mistakes to Avoid
**Don't**: Return only partial HTML without wrapper
```html
<h1>Title</h1>
<p>Content</p>
```
**Do**: Wrap in target selector
```html
<div class="content">
<h1>Title</h1>
<p>Content</p>
</div>
```
**Don't**: Return 200 OK for validation errors
**Do**: Return 422 Unprocessable Entity
**Don't**: Use `onclick="up.follow(this)"`
**Do**: Use `up-follow` attribute (handles keyboard, accessibility)
---
## Server Response Examples
### Successful Form Submission
```http
HTTP/1.1 200 OK
<div id="success">
User created successfully!
</div>
```
### Validation Error
```http
HTTP/1.1 422 Unprocessable Entity
<form action="/users" method="post" up-submit>
<input name="email" value="invalid">
<div class="error">Email is invalid</div>
<button>Submit</button>
</form>
```
### Partial Response (Optimized)
```http
HTTP/1.1 200 OK
Vary: X-Up-Target
<div class="user-details">
<!-- Only the targeted fragment -->
</div>
```

View File

@@ -53,11 +53,9 @@ fn do() ! {
mut cmd := Command{
name: 'hero'
description: 'Your HERO toolset.'
version: '1.0.35'
version: '1.0.36'
}
// herocmds.cmd_run_add_flags(mut cmd)
mut toinstall := false
if !osal.cmd_exists('mc') || !osal.cmd_exists('redis-cli') {
toinstall = true
@@ -86,11 +84,13 @@ fn do() ! {
base.redis_install()!
herocmds.cmd_run(mut cmd)
herocmds.cmd_git(mut cmd)
herocmds.cmd_generator(mut cmd)
herocmds.cmd_docusaurus(mut cmd)
herocmds.cmd_web(mut cmd)
herocmds.cmd_sshagent(mut cmd)
herocmds.cmd_atlas(mut cmd)
cmd.setup()
cmd.parse(os.args)
@@ -103,4 +103,4 @@ fn main() {
print_backtrace()
exit(1)
}
}
}

47
compare_dirs.sh Executable file
View File

@@ -0,0 +1,47 @@
#!/bin/bash
# Usage: ./compare_dirs.sh <branch1> <branch2> <dir_path>
# Example: ./compare_dirs.sh main feature-branch src
if [ "$#" -ne 3 ]; then
echo "Usage: $0 <branch1> <branch2> <dir_path>"
exit 1
fi
BRANCH1=$1
BRANCH2=$2
DIR_PATH=$3
TMP_DIR1=$(mktemp -d)
TMP_DIR2=$(mktemp -d)
# Ensure we're in a Git repo
if ! git rev-parse --is-inside-work-tree > /dev/null 2>&1; then
echo "Error: Not inside a Git repository"
exit 1
fi
# Fetch branch contents without switching branches
git worktree add "$TMP_DIR1" "$BRANCH1" > /dev/null 2>&1
git worktree add "$TMP_DIR2" "$BRANCH2" > /dev/null 2>&1
# Check if the directory exists in both branches
if [ ! -d "$TMP_DIR1/$DIR_PATH" ]; then
echo "Error: $DIR_PATH does not exist in $BRANCH1"
exit 1
fi
if [ ! -d "$TMP_DIR2/$DIR_PATH" ]; then
echo "Error: $DIR_PATH does not exist in $BRANCH2"
exit 1
fi
# Compare directories
echo "Comparing $DIR_PATH between $BRANCH1 and $BRANCH2..."
diff -qr "$TMP_DIR1/$DIR_PATH" "$TMP_DIR2/$DIR_PATH"
# Detailed differences
diff -u -r "$TMP_DIR1/$DIR_PATH" "$TMP_DIR2/$DIR_PATH"
# Clean up temporary worktrees
git worktree remove "$TMP_DIR1" --force
git worktree remove "$TMP_DIR2" --force

View File

@@ -5,8 +5,8 @@ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$SCRIPT_DIR"
# Copy installation files
cp ../../install_v.sh ./scripts/install_v.sh
cp ../../install_herolib.vsh ./scripts/install_herolib.vsh
cp ../../scripts/install_v.sh ./scripts/install_v.sh
cp ../../scripts/install_herolib.vsh ./scripts/install_herolib.vsh
# Docker image and container names
DOCKER_IMAGE_NAME="herolib"

29
examples/ai/aiclient.vsh Executable file
View File

@@ -0,0 +1,29 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.ai.client
mut cl := client.new()!
// response := cl.llms.llm_local.chat_completion(
// message: 'Explain quantum computing in simple terms'
// temperature: 0.5
// max_completion_tokens: 1024
// )!
response := cl.llms.llm_maverick.chat_completion(
message: 'Explain quantum computing in simple terms'
temperature: 0.5
max_completion_tokens: 1024
)!
println(response)
// response := cl.llms.llm_embed_local.embed(input: [
// 'The food was delicious and the waiter..',
// ])!
// response2 := cl.llms.llm_embed.embed(input: [
// 'The food was delicious and the waiter..',
// ])!
println(response2)

8
examples/ai/flow_test1.vsh Executable file
View File

@@ -0,0 +1,8 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.ai.client
import incubaid.herolib.ai.flow_calendar
prompt = 'Explain quantum computing in simple terms'
flow_calendar.start(mut coordinator, prompt)!

26
examples/ai/groq.vsh Executable file
View File

@@ -0,0 +1,26 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openai
import os
import incubaid.herolib.core.playcmds
// models see https://console.groq.com/docs/models
playcmds.run(
heroscript: '
!!openai.configure name:"groq"
url:"https://api.groq.com/openai/v1"
model_default:"openai/gpt-oss-120b"
'
reset: true
)!
mut client := openai.get(name: 'groq')!
response := client.chat_completion(
message: 'Explain quantum computing in simple terms'
temperature: 0.5
max_completion_tokens: 1024
)!
println(response.result)

View File

@@ -2,7 +2,7 @@
import incubaid.herolib.clients.jina
mut jina_client := jina.get()!
mut jina_client := jina.new()!
health := jina_client.health()!
println('Server health: ${health}')
@@ -34,7 +34,7 @@ train_result := jina_client.train(
label: 'positive'
},
jina.TrainingExample{
image: 'https://letsenhance.io/static/73136da51c245e80edc6ccfe44888a99/1015f/MainBefore.jpg'
image: 'https://picsum.photos/id/11/367/267'
label: 'negative'
},
]
@@ -50,7 +50,7 @@ classify_result := jina_client.classify(
text: 'A photo of a cat'
},
jina.ClassificationInput{
image: 'https://letsenhance.io/static/73136da51c245e80edc6ccfe44888a99/1015f/MainBefore.jpg'
image: 'https://picsum.photos/id/11/367/267'
},
]
labels: ['cat', 'dog']

30
examples/ai/jina_simple.vsh Executable file
View File

@@ -0,0 +1,30 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.jina
import os
import json
mut j := jina.new()!
embeddings := j.create_embeddings(
input: ['Hello world', 'This is a test']
model: .jina_embeddings_v3
task: 'separation'
) or {
println('Error creating embeddings: ${err}')
return
}
println('Embeddings created successfully!')
println('Model: ${embeddings.model}')
println('Dimension: ${embeddings.dimension}')
println('Number of embeddings: ${embeddings.data.len}')
// If there are embeddings, print the first one (truncated)
if embeddings.data.len > 0 {
first_embedding := embeddings.data[0]
println('First embedding (first 5 values): ${first_embedding.embedding[0..5]}')
}
// Usage information
println('Token usage: ${embeddings.usage.total_tokens} ${embeddings.usage.unit}')

View File

@@ -0,0 +1,120 @@
# OpenRouter Examples - Proof of Concept
## Overview
This folder contains **example scripts** demonstrating how to use the **OpenAI client** (`herolib.clients.openai`) configured to work with **OpenRouter**.
* **Goal:** Show how to send messages to OpenRouter models using the OpenAI client, run a **two-model pipeline** for code enhancement, and illustrate multi-model usage.
* **Key Insight:** The OpenAI client is OpenRouter-compatible by design - simply configure it with OpenRouter's base URL (`https://openrouter.ai/api/v1`) and API key.
---
## Configuration
All examples configure the OpenAI client to use OpenRouter by setting:
* **URL**: `https://openrouter.ai/api/v1`
* **API Key**: Read from `OPENROUTER_API_KEY` environment variable
* **Model**: OpenRouter model IDs (e.g., `qwen/qwen-2.5-coder-32b-instruct`)
Example configuration:
```v
playcmds.run(
heroscript: '
!!openai.configure
name: "default"
url: "https://openrouter.ai/api/v1"
model_default: "qwen/qwen-2.5-coder-32b-instruct"
'
)!
```
---
## Example Scripts
### 1. `openai_init.vsh`
* **Purpose:** Basic initialization example showing OpenAI client configured for OpenRouter.
* **Demonstrates:** Client configuration and simple chat completion.
* **Usage:**
```bash
examples/ai/openai/openai_init.vsh
```
---
### 2. `openai_hello.vsh`
* **Purpose:** Simple hello message to OpenRouter.
* **Demonstrates:** Sending a single message using `client.chat_completion`.
* **Usage:**
```bash
examples/ai/openai/openai_hello.vsh
```
* **Expected output:** A friendly "hello" response from the AI and token usage.
---
### 3. `openai_example.vsh`
* **Purpose:** Demonstrates basic conversation features.
* **Demonstrates:**
* Sending a single message
* Using system + user messages for conversation context
* Printing token usage
* **Usage:**
```bash
examples/ai/openai/openai_example.vsh
```
* **Expected output:** Responses from the AI for both simple and system-prompt conversations.
---
### 4. `openai_two_model_pipeline.vsh`
* **Purpose:** Two-model code enhancement pipeline (proof of concept).
* **Demonstrates:**
* Model A (`Qwen3 Coder`) suggests code improvements.
* Model B (`morph-v3-fast`) applies the suggested edits.
* Tracks tokens and shows before/after code.
* Using two separate OpenAI client instances with different models
* **Usage:**
```bash
examples/ai/openai/openai_two_model_pipeline.vsh
```
* **Expected output:**
* Original code
* Suggested edits
* Final updated code
* Token usage summary
---
## Environment Variables
Set your OpenRouter API key before running the examples:
```bash
export OPENROUTER_API_KEY="sk-or-v1-..."
```
The OpenAI client automatically detects when the URL contains "openrouter" and will use the `OPENROUTER_API_KEY` environment variable.
---
## Notes
1. **No separate OpenRouter client needed** - The OpenAI client is fully compatible with OpenRouter's API.
2. All scripts configure the OpenAI client with OpenRouter's base URL.
3. The two-model pipeline uses **two separate client instances** (one per model) to demonstrate multi-model workflows.
4. Scripts can be run individually using the `v -enable-globals run` command.
5. The two-model pipeline is a **proof of concept**; the flow can later be extended to multiple files or OpenRPC specs.

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openai
import incubaid.herolib.core.playcmds
// Configure OpenAI client to use OpenRouter
playcmds.run(
heroscript: '
!!openai.configure
name: "default"
url: "https://openrouter.ai/api/v1"
model_default: "qwen/qwen-2.5-coder-32b-instruct"
'
)!
// Get the client instance
mut client := openai.get()!
println('🤖 OpenRouter Client Example (using OpenAI client)')
println(''.repeat(50))
println('')
// Example 1: Simple message
println('Example 1: Simple Hello')
println(''.repeat(50))
mut r := client.chat_completion(
model: 'qwen/qwen-2.5-coder-32b-instruct'
message: 'Say hello in a creative way!'
temperature: 0.7
max_completion_tokens: 150
)!
println('AI: ${r.result}')
println('Tokens: ${r.usage.total_tokens}\n')
// Example 2: Conversation with system prompt
println('Example 2: Conversation with System Prompt')
println(''.repeat(50))
r = client.chat_completion(
model: 'qwen/qwen-2.5-coder-32b-instruct'
messages: [
openai.Message{
role: .system
content: 'You are a helpful coding assistant who speaks concisely.'
},
openai.Message{
role: .user
content: 'What is V programming language?'
},
]
temperature: 0.3
max_completion_tokens: 200
)!
println('AI: ${r.result}')
println('Tokens: ${r.usage.total_tokens}\n')
println(''.repeat(50))
println(' Examples completed successfully!')

View File

@@ -0,0 +1,41 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openai
import incubaid.herolib.core.playcmds
// Configure OpenAI client to use OpenRouter
playcmds.run(
heroscript: '
!!openai.configure
name: "default"
url: "https://openrouter.ai/api/v1"
model_default: "qwen/qwen-2.5-coder-32b-instruct"
'
)!
// Get the client instance
mut client := openai.get() or {
eprintln('Failed to get client: ${err}')
return
}
println('Sending message to OpenRouter...\n')
// Simple hello message
response := client.chat_completion(
model: 'qwen/qwen-2.5-coder-32b-instruct'
message: 'Say hello in a friendly way!'
temperature: 0.7
max_completion_tokens: 100
) or {
eprintln('Failed to get completion: ${err}')
return
}
println('Response from AI:')
println(''.repeat(50))
println(response.result)
println(''.repeat(50))
println('\nTokens used: ${response.usage.total_tokens}')
println(' - Prompt: ${response.usage.prompt_tokens}')
println(' - Completion: ${response.usage.completion_tokens}')

View File

@@ -3,6 +3,8 @@
import incubaid.herolib.clients.openai
import incubaid.herolib.core.playcmds
// to set the API key, either set it here, or set the OPENAI_API_KEY environment variable
playcmds.run(
heroscript: '
!!openai.configure name: "default" key: "" url: "https://openrouter.ai/api/v1" model_default: "gpt-oss-120b"
@@ -18,3 +20,5 @@ mut r := client.chat_completion(
temperature: 0.3
max_completion_tokens: 1024
)!
println(r.result)

View File

@@ -0,0 +1,134 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openai
import incubaid.herolib.core.playcmds
// Sample code file to be improved
const sample_code = '
def calculate_sum(numbers):
total = 0
for i in range(len(numbers)):
total = total + numbers[i]
return total
def find_max(lst):
max = lst[0]
for i in range(1, len(lst)):
if lst[i] > max:
max = lst[i]
return max
'
// Configure two OpenAI client instances to use OpenRouter with different models
// Model A: Enhancement model (Qwen Coder)
playcmds.run(
heroscript: '
!!openai.configure
name: "enhancer"
url: "https://openrouter.ai/api/v1"
model_default: "qwen/qwen-2.5-coder-32b-instruct"
'
)!
// Model B: Modification model (Llama 3.3 70B)
playcmds.run(
heroscript: '
!!openai.configure
name: "modifier"
url: "https://openrouter.ai/api/v1"
model_default: "meta-llama/llama-3.3-70b-instruct"
'
)!
mut enhancer := openai.get(name: 'enhancer') or { panic('Failed to get enhancer client: ${err}') }
mut modifier := openai.get(name: 'modifier') or { panic('Failed to get modifier client: ${err}') }
println(''.repeat(70))
println('🔧 Two-Model Code Enhancement Pipeline - Proof of Concept')
println('🔧 Using OpenAI client configured for OpenRouter')
println(''.repeat(70))
println('')
// Step 1: Get enhancement suggestions from Model A (Qwen Coder)
println('📝 STEP 1: Code Enhancement Analysis')
println(''.repeat(70))
println('Model: qwen/qwen-2.5-coder-32b-instruct')
println('Task: Analyze code and suggest improvements\n')
enhancement_prompt := 'You are a code enhancement agent.
Your job is to analyze the following Python code and propose improvements or fixes.
Output your response as **pure edits or diffs only**, not a full rewritten file.
Focus on:
- Performance improvements
- Pythonic idioms
- Bug fixes
- Code clarity
Here is the code to analyze:
${sample_code}
Provide specific edit instructions or diffs.'
println('🤖 Sending to enhancement model...')
enhancement_result := enhancer.chat_completion(
message: enhancement_prompt
temperature: 0.3
max_completion_tokens: 2000
) or {
eprintln(' Enhancement failed: ${err}')
return
}
println('\n Enhancement suggestions received:')
println(''.repeat(70))
println(enhancement_result.result)
println(''.repeat(70))
println('Tokens used: ${enhancement_result.usage.total_tokens}\n')
// Step 2: Apply edits using Model B (Llama 3.3 70B)
println('\n📝 STEP 2: Apply Code Modifications')
println(''.repeat(70))
println('Model: meta-llama/llama-3.3-70b-instruct')
println('Task: Apply the suggested edits to produce updated code\n')
modification_prompt := 'You are a file editing agent.
Apply the given edits or diffs to the provided file.
Output the updated Python code only, without comments or explanations.
ORIGINAL CODE:
${sample_code}
EDITS TO APPLY:
${enhancement_result.result}
Output only the final, updated Python code.'
println('🤖 Sending to modification model...')
modification_result := modifier.chat_completion(
message: modification_prompt
temperature: 0.1
max_completion_tokens: 2000
) or {
eprintln(' Modification failed: ${err}')
return
}
println('\n Modified code received:')
println(''.repeat(70))
println(modification_result.result)
println(''.repeat(70))
println('Tokens used: ${modification_result.usage.total_tokens}\n')
// Summary
println('\n📊 PIPELINE SUMMARY')
println(''.repeat(70))
println('Original code length: ${sample_code.len} chars')
println('Enhancement model: qwen/qwen-2.5-coder-32b-instruct')
println('Enhancement tokens: ${enhancement_result.usage.total_tokens}')
println('Modification model: meta-llama/llama-3.3-70b-instruct')
println('Modification tokens: ${modification_result.usage.total_tokens}')
println('Total tokens: ${enhancement_result.usage.total_tokens +
modification_result.usage.total_tokens}')
println(''.repeat(70))
println('\n Two-model pipeline completed successfully!')

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openai
import incubaid.herolib.core.playcmds
playcmds.run(
heroscript: '
!!openai.configure name:"default"
url:"https://openrouter.ai/api/v1"
model_default:"gpt-oss-120b"
'
reset: false
)!
// Get the client instance
mut client := openai.get() or {
eprintln('Failed to get client: ${err}')
return
}
println(client.list_models()!)
println('Sending message to OpenRouter...\n')
// Simple hello message
response := client.chat_completion(
model: 'qwen/qwen-2.5-coder-32b-instruct'
message: 'Say hello in a friendly way!'
temperature: 0.7
max_completion_tokens: 100
) or {
eprintln('Failed to get completion: ${err}')
return
}
println('Response from AI:')
println(''.repeat(50))
println(response.result)
println(''.repeat(50))
println('\nTokens used: ${response.usage.total_tokens}')
println(' - Prompt: ${response.usage.prompt_tokens}')
println(' - Completion: ${response.usage.completion_tokens}')

9
examples/ai/readme.md Normal file
View File

@@ -0,0 +1,9 @@
configuration can happen by means of environment variables, e.g.:
```bash
export OPENROUTER_API_KEY='sk-or-v1-..'
export JINAKEY='jina_..'
export GROQKEY='gsk_'
```

View File

@@ -1,71 +0,0 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
module main
import incubaid.herolib.clients.openai
import os
fn test1(mut client openai.OpenAI) ! {
instruction := '
You are a template language converter. You convert Pug templates to Jet templates.
The target template language, Jet, is defined as follows:
'
// Create a chat completion request
res := client.chat_completion(
msgs: openai.Messages{
messages: [
openai.Message{
role: .user
content: 'What are the key differences between Groq and other AI inference providers?'
},
]
}
)!
// Print the response
println('\nGroq AI Response:')
println('==================')
println(res.choices[0].message.content)
println('\nUsage Statistics:')
println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}')
println('Total tokens: ${res.usage.total_tokens}')
}
fn test2(mut client openai.OpenAI) ! {
// Create a chat completion request
res := client.chat_completion(
model: 'deepseek-r1-distill-llama-70b'
msgs: openai.Messages{
messages: [
openai.Message{
role: .user
content: 'A story of 10 lines?'
},
]
}
)!
println('\nGroq AI Response:')
println('==================')
println(res.choices[0].message.content)
println('\nUsage Statistics:')
println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}')
println('Total tokens: ${res.usage.total_tokens}')
}
println("
TO USE:
export AIKEY='gsk_...'
export AIURL='https://api.groq.com/openai/v1'
export AIMODEL='llama-3.3-70b-versatile'
")
mut client := openai.get(name: 'test')!
println(client)
// test1(mut client)!
test2(mut client)!

391
examples/builder/zosbuilder.vsh Executable file
View File

@@ -0,0 +1,391 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.builder
import incubaid.herolib.core.pathlib
// Configuration for the remote builder
// Update these values for your remote machine
const remote_host = 'root@65.109.31.171' // Change to your remote host
const remote_port = 22 // SSH port
// Build configuration
const build_dir = '/root/zosbuilder'
const repo_url = 'https://git.ourworld.tf/tfgrid/zosbuilder'
// Optional: Set to true to upload kernel to S3
const upload_kernel = false
fn main() {
println('=== Zero OS Builder - Remote Build System ===\n')
// Initialize builder
mut b := builder.new() or {
eprintln('Failed to initialize builder: ${err}')
exit(1)
}
// Connect to remote node
println('Connecting to remote builder: ${remote_host}:${remote_port}')
mut node := b.node_new(
ipaddr: '${remote_host}:${remote_port}'
name: 'zosbuilder'
) or {
eprintln('Failed to connect to remote node: ${err}')
exit(1)
}
// Run the build process
build_zos(mut node) or {
eprintln('Build failed: ${err}')
exit(1)
}
println('\n=== Build completed successfully! ===')
}
fn build_zos(mut node builder.Node) ! {
println('\n--- Step 1: Installing prerequisites ---')
install_prerequisites(mut node)!
println('\n--- Step 2: Cloning zosbuilder repository ---')
clone_repository(mut node)!
println('\n--- Step 3: Creating RFS configuration ---')
create_rfs_config(mut node)!
println('\n--- Step 4: Running build ---')
run_build(mut node)!
println('\n--- Step 5: Checking build artifacts ---')
check_artifacts(mut node)!
println('\n=== Build completed successfully! ===')
}
fn install_prerequisites(mut node builder.Node) ! {
println('Detecting platform...')
// Check platform type
if node.platform == .ubuntu {
println('Installing Ubuntu/Debian prerequisites...')
// Update package list and install all required packages
node.exec_cmd(
cmd: '
apt-get update
apt-get install -y \\
build-essential \\
upx-ucl \\
binutils \\
git \\
wget \\
curl \\
qemu-system-x86 \\
podman \\
musl-tools \\
cpio \\
xz-utils \\
bc \\
flex \\
bison \\
libelf-dev \\
libssl-dev
# Install rustup and Rust toolchain
if ! command -v rustup &> /dev/null; then
echo "Installing rustup..."
curl --proto "=https" --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
source "\$HOME/.cargo/env"
fi
# Add Rust musl target
source "\$HOME/.cargo/env"
rustup target add x86_64-unknown-linux-musl
'
name: 'install_ubuntu_packages'
reset: true
)!
} else if node.platform == .alpine {
println('Installing Alpine prerequisites...')
node.exec_cmd(
cmd: '
apk add --no-cache \\
build-base \\
rust \\
cargo \\
upx \\
git \\
wget \\
qemu-system-x86 \\
podman
# Add Rust musl target
rustup target add x86_64-unknown-linux-musl || echo "rustup not available"
'
name: 'install_alpine_packages'
reset: true
)!
} else {
return error('Unsupported platform: ${node.platform}. Only Ubuntu/Debian and Alpine are supported.')
}
println('Prerequisites installed successfully')
}
fn clone_repository(mut node builder.Node) ! {
// Clean up disk space first
println('Cleaning up disk space...')
node.exec_cmd(
cmd: '
# Remove old build directories if they exist
rm -rf ${build_dir} || true
# Clean up podman/docker cache to free space
podman system prune -af || true
# Clean up package manager cache
if command -v apt-get &> /dev/null; then
apt-get clean || true
fi
# Show disk space
df -h /
'
name: 'cleanup_disk_space'
stdout: true
)!
// Clone the repository
println('Cloning from ${repo_url}...')
node.exec_cmd(
cmd: '
git clone ${repo_url} ${build_dir}
cd ${build_dir}
git log -1 --oneline
'
name: 'clone_zosbuilder'
stdout: true
)!
println('Repository cloned successfully')
}
fn create_rfs_config(mut node builder.Node) ! {
println('Creating config/rfs.conf...')
rfs_config := 'S3_ENDPOINT="http://wizenoze.grid.tf:3900"
S3_REGION="garage"
S3_BUCKET="zos"
S3_PREFIX="store"
S3_ACCESS_KEY="<put key here>"
S3_SECRET_KEY="<put key here>"
WEB_ENDPOINT=""
MANIFESTS_SUBPATH="flists"
READ_ACCESS_KEY="<put key here>"
READ_SECRET_KEY="<put key here>"
ROUTE_ENDPOINT="http://wizenoze.grid.tf:3900"
ROUTE_PATH="/zos/store"
ROUTE_REGION="garage"
KEEP_S3_FALLBACK="false"
UPLOAD_MANIFESTS="true"
'
// Create config directory if it doesn't exist
node.exec_cmd(
cmd: 'mkdir -p ${build_dir}/config'
name: 'create_config_dir'
stdout: false
)!
// Write the RFS configuration file
node.file_write('${build_dir}/config/rfs.conf', rfs_config)!
// Verify the file was created
result := node.exec(
cmd: 'cat ${build_dir}/config/rfs.conf'
stdout: false
)!
println('RFS configuration created successfully')
println('Config preview:')
println(result)
// Skip youki component by removing it from sources.conf
println('\nRemoving youki from sources.conf (requires SSH keys)...')
node.exec_cmd(
cmd: '
# Remove any line containing youki from sources.conf
grep -v "youki" ${build_dir}/config/sources.conf > ${build_dir}/config/sources.conf.tmp
mv ${build_dir}/config/sources.conf.tmp ${build_dir}/config/sources.conf
# Verify it was removed
echo "Updated sources.conf:"
cat ${build_dir}/config/sources.conf
'
name: 'remove_youki'
stdout: true
)!
println('youki component skipped')
}
fn run_build(mut node builder.Node) ! {
println('Starting build process...')
println('This may take 15-30 minutes depending on your system...')
println('Status updates will be printed every 2 minutes...\n')
// Check disk space before building
println('Checking disk space...')
disk_info := node.exec(
cmd: 'df -h ${build_dir}'
stdout: false
)!
println(disk_info)
// Clean up any previous build artifacts and corrupted databases
println('Cleaning up previous build artifacts...')
node.exec_cmd(
cmd: '
cd ${build_dir}
# Remove dist directory to clean up any corrupted databases
rm -rf dist/
# Clean up any temporary files
rm -rf /tmp/rfs-* || true
# Show available disk space after cleanup
df -h ${build_dir}
'
name: 'cleanup_before_build'
stdout: true
)!
// Make scripts executable and run build with periodic status messages
mut build_cmd := '
cd ${build_dir}
# Source Rust environment
source "\$HOME/.cargo/env"
# Make scripts executable
chmod +x scripts/build.sh scripts/clean.sh
# Set environment variables
export UPLOAD_KERNEL=${upload_kernel}
export UPLOAD_MANIFESTS=false
# Create a wrapper script that prints status every 2 minutes
cat > /tmp/build_with_status.sh << "EOF"
#!/bin/bash
set -e
# Source Rust environment
source "\$HOME/.cargo/env"
# Start the build in background
./scripts/build.sh &
BUILD_PID=\$!
# Print status every 2 minutes while build is running
COUNTER=0
while kill -0 \$BUILD_PID 2>/dev/null; do
sleep 120
COUNTER=\$((COUNTER + 2))
echo ""
echo "=== Build still in progress... (\${COUNTER} minutes elapsed) ==="
echo ""
done
# Wait for build to complete and get exit code
wait \$BUILD_PID
EXIT_CODE=\$?
if [ \$EXIT_CODE -eq 0 ]; then
echo ""
echo "=== Build completed successfully after \${COUNTER} minutes ==="
else
echo ""
echo "=== Build failed after \${COUNTER} minutes with exit code \$EXIT_CODE ==="
fi
exit \$EXIT_CODE
EOF
chmod +x /tmp/build_with_status.sh
/tmp/build_with_status.sh
' // Execute build with output
result := node.exec_cmd(
cmd: build_cmd
name: 'zos_build'
stdout: true
reset: true
period: 0 // Don't cache, always rebuild
)!
println('\nBuild completed!')
println(result)
}
fn check_artifacts(mut node builder.Node) ! {
println('Checking build artifacts in ${build_dir}/dist/...')
// List the dist directory
result := node.exec(
cmd: 'ls -lh ${build_dir}/dist/'
stdout: true
)!
println('\nBuild artifacts:')
println(result)
// Check for expected files
vmlinuz_exists := node.file_exists('${build_dir}/dist/vmlinuz.efi')
initramfs_exists := node.file_exists('${build_dir}/dist/initramfs.cpio.xz')
if vmlinuz_exists && initramfs_exists {
println('\n Build artifacts created successfully:')
println(' - vmlinuz.efi (Kernel with embedded initramfs)')
println(' - initramfs.cpio.xz (Standalone initramfs archive)')
// Get file sizes
size_info := node.exec(
cmd: 'du -h ${build_dir}/dist/vmlinuz.efi ${build_dir}/dist/initramfs.cpio.xz'
stdout: false
)!
println('\nFile sizes:')
println(size_info)
} else {
return error('Build artifacts not found. Build may have failed.')
}
}
// Download artifacts to local machine
fn download_artifacts(mut node builder.Node, local_dest string) ! {
println('Downloading artifacts to local machine...')
mut dest_path := pathlib.get_dir(path: local_dest, create: true)!
println('Downloading to ${dest_path.path}...')
// Download the entire dist directory
node.download(
source: '${build_dir}/dist/'
dest: dest_path.path
)!
println('\n Artifacts downloaded successfully to ${dest_path.path}')
// List downloaded files
println('\nDownloaded files:')
result := node.exec(
cmd: 'ls -lh ${dest_path.path}'
stdout: false
) or {
println('Could not list local files')
return
}
println(result)
}

View File

@@ -0,0 +1,224 @@
# Zero OS Builder - Remote Build System
This example demonstrates how to build [Zero OS (zosbuilder)](https://git.ourworld.tf/tfgrid/zosbuilder) on a remote machine using the herolib builder module.
## Overview
The zosbuilder creates a Zero OS Alpine Initramfs with:
- Alpine Linux 3.22 base
- Custom kernel with embedded initramfs
- ThreeFold components (zinit, rfs, mycelium, zosstorage)
- Optimized size with UPX compression
- Two-stage module loading
## Prerequisites
### Local Machine
- V compiler installed
- SSH access to a remote build machine
- herolib installed
### Remote Build Machine
The script will automatically install these on the remote machine:
- **Ubuntu/Debian**: build-essential, rustc, cargo, upx-ucl, binutils, git, wget, qemu-system-x86, podman, musl-tools
- **Alpine Linux**: build-base, rust, cargo, upx, git, wget, qemu-system-x86, podman
- Rust musl target (x86_64-unknown-linux-musl)
## Configuration
Edit the constants in `zosbuilder.vsh`:
```v
const (
// Remote machine connection
remote_host = 'root@195.192.213.2' // Your remote host
remote_port = 22 // SSH port
// Build configuration
build_dir = '/root/zosbuilder' // Build directory on remote
repo_url = 'https://git.ourworld.tf/tfgrid/zosbuilder'
// Optional: Upload kernel to S3
upload_kernel = false
)
```
## Usage
### Basic Build
```bash
# Make the script executable
chmod +x zosbuilder.vsh
# Run the build
./zosbuilder.vsh
```
### What the Script Does
1. **Connects to Remote Machine**: Establishes SSH connection to the build server
2. **Installs Prerequisites**: Automatically installs all required build tools
3. **Clones Repository**: Fetches the latest zosbuilder code
4. **Runs Build**: Executes the build process (takes 15-30 minutes)
5. **Verifies Artifacts**: Checks that build outputs were created successfully
### Build Output
The build creates two main artifacts in `${build_dir}/dist/`:
- `vmlinuz.efi` - Kernel with embedded initramfs (bootable)
- `initramfs.cpio.xz` - Standalone initramfs archive
## Build Process Details
The zosbuilder follows these phases:
### Phase 1: Environment Setup
- Creates build directories
- Installs build dependencies
- Sets up Rust musl target
### Phase 2: Alpine Base
- Downloads Alpine 3.22 miniroot
- Extracts to initramfs directory
- Installs packages from config/packages.list
### Phase 3: Component Building
- Builds zinit (init system)
- Builds rfs (remote filesystem)
- Builds mycelium (networking)
- Builds zosstorage (storage orchestration)
### Phase 4: System Configuration
- Replaces /sbin/init with zinit
- Copies zinit configuration
- Sets up 2-stage module loading
- Configures system services
### Phase 5: Optimization
- Removes docs, man pages, locales
- Strips executables and libraries
- UPX compresses all binaries
- Aggressive cleanup
### Phase 6: Packaging
- Creates initramfs.cpio.xz with XZ compression
- Builds kernel with embedded initramfs
- Generates vmlinuz.efi
- Optionally uploads to S3
## Advanced Usage
### Download Artifacts to Local Machine
Add this to your script after the build completes:
```v
// Download artifacts to local machine
download_artifacts(mut node, '/tmp/zos-artifacts') or {
eprintln('Failed to download artifacts: ${err}')
}
```
### Custom Build Configuration
You can modify the build by editing files on the remote machine before building:
```v
// After cloning, before building
node.file_write('${build_dir}/config/packages.list', 'your custom packages')!
```
### Rebuild Without Re-cloning
To rebuild without re-cloning the repository, modify the script to skip the clone step:
```v
// Comment out the clone_repository call
// clone_repository(mut node)!
// Or just run the build directly
node.exec_cmd(
cmd: 'cd ${build_dir} && ./scripts/build.sh'
name: 'zos_rebuild'
)!
```
## Testing the Build
After building, you can test the kernel with QEMU:
```bash
# On the remote machine
cd /root/zosbuilder
./scripts/test-qemu.sh
```
## Troubleshooting
### Build Fails
1. Check the build output for specific errors
2. Verify all prerequisites are installed
3. Ensure sufficient disk space (at least 5GB)
4. Check internet connectivity for downloading components
### SSH Connection Issues
1. Verify SSH access: `ssh root@195.192.213.2`
2. Check SSH key authentication is set up
3. Verify the remote host and port are correct
### Missing Dependencies
The script automatically installs dependencies, but if manual installation is needed:
**Ubuntu/Debian:**
```bash
sudo apt-get update
sudo apt-get install -y build-essential rustc cargo upx-ucl binutils git wget qemu-system-x86 podman musl-tools
rustup target add x86_64-unknown-linux-musl
```
**Alpine Linux:**
```bash
apk add --no-cache build-base rust cargo upx git wget qemu-system-x86 podman
rustup target add x86_64-unknown-linux-musl
```
## Integration with CI/CD
This builder can be integrated into CI/CD pipelines:
```v
// Example: Build and upload to artifact storage
fn ci_build() ! {
mut b := builder.new()!
mut node := b.node_new(ipaddr: '${ci_builder_host}')!
build_zos(mut node)!
// Upload to artifact storage
node.exec_cmd(
cmd: 's3cmd put ${build_dir}/dist/* s3://artifacts/zos/'
name: 'upload_artifacts'
)!
}
```
## Related Examples
- `simple.vsh` - Basic builder usage
- `remote_executor/` - Remote code execution
- `simple_ip4.vsh` - IPv4 connection example
- `simple_ip6.vsh` - IPv6 connection example
## References
- [zosbuilder Repository](https://git.ourworld.tf/tfgrid/zosbuilder)
- [herolib Builder Documentation](../../lib/builder/readme.md)
- [Zero OS Documentation](https://manual.grid.tf/)
## License
This example follows the same license as herolib.

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.jina
import os
import json
fn main() {
// Initialize Jina client
mut j := jina.Jina{
name: 'test_client'
secret: os.getenv('JINAKEY')
}
// Initialize the client
j = jina.obj_init(j) or {
println('Error initializing Jina client: ${err}')
return
}
// Check if authentication works
auth_ok := j.check_auth() or {
println('Authentication failed: ${err}')
return
}
println('Authentication successful: ${auth_ok}')
// Create embeddings
model := 'jina-embeddings-v2-base-en'
input := ['Hello world', 'This is a test']
embeddings := j.create_embeddings(input, model, 'search') or {
println('Error creating embeddings: ${err}')
return
}
println('Embeddings created successfully!')
println('Model: ${embeddings.model}')
println('Dimension: ${embeddings.dimension}')
println('Number of embeddings: ${embeddings.data.len}')
// If there are embeddings, print the first one (truncated)
if embeddings.data.len > 0 {
first_embedding := embeddings.data[0]
println('First embedding (first 5 values): ${first_embedding.embedding[0..5]}')
}
// Usage information
println('Token usage: ${embeddings.usage.total_tokens} ${embeddings.usage.unit}')
}

View File

@@ -0,0 +1,182 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.pathlib
import incubaid.herolib.ui.console
import incubaid.herolib.ai.client
import os
fn main() {
console.print_header('Code Generator - V File Analyzer Using AI')
// Find herolib root directory using @FILE
script_dir := os.dir(@FILE)
// Navigate from examples/core/code to root: up 4 levels
herolib_root := os.dir(os.dir(os.dir(script_dir)))
console.print_item('HeroLib Root: ${herolib_root}')
// The directory we want to analyze (lib/core in this case)
target_dir := herolib_root + '/lib/core'
console.print_item('Target Directory: ${target_dir}')
console.print_lf(1)
// Load instruction files from aiprompts
console.print_item('Loading instruction files...')
mut ai_instructions_file := pathlib.get(herolib_root +
'/aiprompts/ai_instructions_hero_models.md')
mut vlang_core_file := pathlib.get(herolib_root + '/aiprompts/vlang_herolib_core.md')
ai_instructions_content := ai_instructions_file.read()!
vlang_core_content := vlang_core_file.read()!
console.print_green(' Instruction files loaded successfully')
console.print_lf(1)
// Initialize AI client
console.print_item('Initializing AI client...')
mut aiclient := client.new()!
console.print_green(' AI client initialized')
console.print_lf(1)
// Get all V files from target directory
console.print_item('Scanning directory for V files...')
mut target_path := pathlib.get_dir(path: target_dir, create: false)!
mut all_files := target_path.list(
regex: [r'\.v$']
recursive: true
)!
console.print_item('Found ${all_files.paths.len} total V files')
// TODO: Walk over all files which do NOT end with _test.v and do NOT start with factory
// Each file becomes a src_file_content object
mut files_to_process := []pathlib.Path{}
for file in all_files.paths {
file_name := file.name()
// Skip test files
if file_name.ends_with('_test.v') {
continue
}
// Skip factory files
if file_name.starts_with('factory') {
continue
}
files_to_process << file
}
console.print_green(' After filtering: ${files_to_process.len} files to process')
console.print_lf(2)
// Process each file with AI
total_files := files_to_process.len
for idx, mut file in files_to_process {
current_idx := idx + 1
process_file_with_ai(mut aiclient, mut file, ai_instructions_content, vlang_core_content,
current_idx, total_files)!
}
console.print_lf(1)
console.print_header(' Code Generation Complete')
console.print_item('Processed ${files_to_process.len} files')
console.print_lf(1)
}
fn process_file_with_ai(mut aiclient client.AIClient, mut file pathlib.Path, ai_instructions string, vlang_core string, current int, total int) ! {
file_name := file.name()
src_file_path := file.absolute()
console.print_item('[${current}/${total}] Analyzing: ${file_name}')
// Read the file content - this is the src_file_content
src_file_content := file.read()!
// Build comprehensive system prompt
// TODO: Load instructions from prompt files and use in prompt
// Build the user prompt with context
user_prompt := '
File: ${file_name}
Path: ${src_file_path}
Current content:
\`\`\`v
${src_file_content}
\`\`\`
Please improve this V file by:
1. Following V language best practices
2. Ensuring proper error handling with ! and or blocks
3. Adding clear documentation comments
4. Following herolib patterns and conventions
5. Improving code clarity and readability
Context from herolib guidelines:
VLANG HEROLIB CORE:
${vlang_core}
AI INSTRUCTIONS FOR HERO MODELS:
${ai_instructions}
Return ONLY the complete improved file wrapped in \`\`\`v code block.
'
console.print_debug_title('Sending to AI', 'Calling AI model to improve ${file_name}...')
// TODO: Call AI client with model gemini-3-pro
aiclient.write_from_prompt(file, user_prompt, [.pro]) or {
console.print_stderr('Error processing ${file_name}: ${err}')
return
}
mut improved_file := pathlib.get(src_file_path + '.improved')
improved_content := improved_file.read()!
// Display improvements summary
sample_chars := 250
preview := if improved_content.len > sample_chars {
improved_content[..sample_chars] + '... (preview truncated)'
} else {
improved_content
}
console.print_debug_title('AI Analysis Results for ${file_name}', preview)
// Optional: Save improved version for review
// Uncomment to enable saving
// improved_file_path := src_file_path + '.improved'
// mut improved_file := pathlib.get_file(path: improved_file_path, create: true)!
// improved_file.write(improved_content)!
// console.print_green('✓ Improvements saved to: ${improved_file_path}')
console.print_lf(1)
}
// Extract V code from markdown code block
fn extract_code_block(response string) string {
// Look for ```v ... ``` block
start_marker := '\`\`\`v'
end_marker := '\`\`\`'
start_idx := response.index(start_marker) or {
// If no ```v, try to return as-is
return response
}
mut content_start := start_idx + start_marker.len
if content_start < response.len && response[content_start] == `\n` {
content_start++
}
end_idx := response.index(end_marker) or { return response[content_start..] }
extracted := response[content_start..end_idx]
return extracted.trim_space()
}

View File

@@ -0,0 +1,56 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.code
import incubaid.herolib.ui.console
import os
console.print_header('Code Parser Example - lib/core/pathlib Analysis')
console.print_lf(1)
pathlib_dir := os.home_dir() + '/code/github/incubaid/herolib/lib/core/pathlib'
// Step 1: List all V files
console.print_header('1. Listing V Files')
v_files := code.list_v_files(pathlib_dir)!
for file in v_files {
console.print_item(os.base(file))
}
console.print_lf(1)
// Step 2: Parse and analyze each file
console.print_header('2. Parsing Files - Summary')
for v_file_path in v_files {
content := os.read_file(v_file_path)!
vfile := code.parse_vfile(content)!
console.print_item('${os.base(v_file_path)}')
console.print_item(' Module: ${vfile.mod}')
console.print_item(' Imports: ${vfile.imports.len}')
console.print_item(' Structs: ${vfile.structs().len}')
console.print_item(' Functions: ${vfile.functions().len}')
}
console.print_lf(1)
// // Step 3: Find Path struct
// console.print_header('3. Analyzing Path Struct')
// path_code := code.get_type_from_module(pathlib_dir, 'Path')!
// console.print_stdout(path_code)
// console.print_lf(1)
// Step 4: List all public functions
console.print_header('4. Public Functions in pathlib')
for v_file_path in v_files {
content := os.read_file(v_file_path)!
vfile := code.parse_vfile(content)!
pub_functions := vfile.functions().filter(it.is_pub)
if pub_functions.len > 0 {
console.print_item('From ${os.base(v_file_path)}:')
for f in pub_functions {
console.print_item(' ${f.name}() -> ${f.result.typ.symbol()}')
}
}
}
console.print_lf(1)
console.print_green(' Analysis completed!')

View File

@@ -0,0 +1,337 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.flows
import incubaid.herolib.core.redisclient
import incubaid.herolib.ui.console
import incubaid.herolib.data.ourtime
import time
fn main() {
mut cons := console.new()
console.print_header('Flow Runner Test Suite')
console.print_lf(1)
// Test 1: Basic Flow Execution
console.print_item('Test 1: Basic Flow with Successful Steps')
test_basic_flow()!
console.print_lf(1)
// Test 2: Error Handling
console.print_item('Test 2: Error Handling with Error Steps')
test_error_handling()!
console.print_lf(1)
// Test 3: Multiple Next Steps
console.print_item('Test 3: Multiple Next Steps')
test_multiple_next_steps()!
console.print_lf(1)
// Test 4: Redis State Retrieval
console.print_item('Test 4: Redis State Retrieval and JSON')
test_redis_state()!
console.print_lf(1)
// Test 5: Complex Flow Chain
console.print_item('Test 5: Complex Flow Chain')
test_complex_flow()!
console.print_lf(1)
console.print_header('All Tests Completed Successfully!')
}
fn test_basic_flow() ! {
mut redis := redisclient.core_get()!
redis.flushdb()!
mut coordinator := flows.new(
name: 'test_basic_flow'
redis: redis
ai: none
)!
// Step 1: Initialize
mut step1 := coordinator.step_new(
name: 'initialize'
description: 'Initialize test environment'
f: fn (mut s flows.Step) ! {
println(' Step 1: Initializing...')
s.context['init_time'] = ourtime.now().str()
}
)!
// Step 2: Process
mut step2 := coordinator.step_new(
name: 'process'
description: 'Process data'
f: fn (mut s flows.Step) ! {
println(' Step 2: Processing...')
s.context['processed'] = 'true'
}
)!
// Step 3: Finalize
mut step3 := coordinator.step_new(
name: 'finalize'
description: 'Finalize results'
f: fn (mut s flows.Step) ! {
println(' Step 3: Finalizing...')
s.context['status'] = 'completed'
}
)!
step1.next_step_add(step2)
step2.next_step_add(step3)
coordinator.run()!
// Verify Redis state
state := coordinator.get_all_steps_state()!
assert state.len >= 3, 'Expected at least 3 steps in Redis'
for step_state in state {
assert step_state['status'] == 'success', 'Expected all steps to be successful'
}
println(' Test 1 PASSED: All steps executed successfully')
coordinator.clear_redis()!
}
fn test_error_handling() ! {
mut redis := redisclient.core_get()!
redis.flushdb()!
mut coordinator := flows.new(
name: 'test_error_flow'
redis: redis
ai: none
)!
// Error step
mut error_recovery := coordinator.step_new(
name: 'error_recovery'
description: 'Recover from error'
f: fn (mut s flows.Step) ! {
println(' Error Step: Executing recovery...')
s.context['recovered'] = 'true'
}
)!
// Main step that fails
mut main_step := coordinator.step_new(
name: 'failing_step'
description: 'This step will fail'
f: fn (mut s flows.Step) ! {
println(' Main Step: Intentionally failing...')
return error('Simulated error for testing')
}
)!
main_step.error_step_add(error_recovery)
// Run and expect error
coordinator.run() or { println(' Error caught as expected: ${err.msg()}') }
// Verify error state in Redis
error_state := coordinator.get_step_state('failing_step')!
assert error_state['status'] == 'error', 'Expected step to be in error state'
recovery_state := coordinator.get_step_state('error_recovery')!
assert recovery_state['status'] == 'success', 'Expected error step to execute'
println(' Test 2 PASSED: Error handling works correctly')
coordinator.clear_redis()!
}
fn test_multiple_next_steps() ! {
mut redis := redisclient.core_get()!
redis.flushdb()!
mut coordinator := flows.new(
name: 'test_parallel_steps'
redis: redis
ai: none
)!
// Parent step
mut parent := coordinator.step_new(
name: 'parent_step'
description: 'Parent step with multiple children'
f: fn (mut s flows.Step) ! {
println(' Parent Step: Executing...')
}
)!
// Child steps
mut child1 := coordinator.step_new(
name: 'child_step_1'
description: 'First child'
f: fn (mut s flows.Step) ! {
println(' Child Step 1: Executing...')
}
)!
mut child2 := coordinator.step_new(
name: 'child_step_2'
description: 'Second child'
f: fn (mut s flows.Step) ! {
println(' Child Step 2: Executing...')
}
)!
mut child3 := coordinator.step_new(
name: 'child_step_3'
description: 'Third child'
f: fn (mut s flows.Step) ! {
println(' Child Step 3: Executing...')
}
)!
// Add multiple next steps
parent.next_step_add(child1)
parent.next_step_add(child2)
parent.next_step_add(child3)
coordinator.run()!
// Verify all steps executed
all_states := coordinator.get_all_steps_state()!
assert all_states.len >= 4, 'Expected 4 steps to execute'
println(' Test 3 PASSED: Multiple next steps executed sequentially')
coordinator.clear_redis()!
}
fn test_redis_state() ! {
mut redis := redisclient.core_get()!
redis.flushdb()!
mut coordinator := flows.new(
name: 'test_redis_state'
redis: redis
ai: none
)!
mut step1 := coordinator.step_new(
name: 'redis_test_step'
description: 'Test Redis state storage'
f: fn (mut s flows.Step) ! {
println(' Executing step with context...')
s.context['user'] = 'test_user'
s.context['action'] = 'test_action'
}
)!
coordinator.run()!
// Retrieve state from Redis
step_state := coordinator.get_step_state('redis_test_step')!
println(' Step state in Redis:')
for key, value in step_state {
println(' ${key}: ${value}')
}
// Verify fields
assert step_state['name'] == 'redis_test_step', 'Step name mismatch'
assert step_state['status'] == 'success', 'Step status should be success'
assert step_state['description'] == 'Test Redis state storage', 'Description mismatch'
// Verify JSON is stored
if json_data := step_state['json'] {
println(' JSON data stored in Redis: ${json_data[0..50]}...')
}
// Verify log count
logs_count := step_state['logs_count'] or { '0' }
println(' Logs count: ${logs_count}')
println(' Test 4 PASSED: Redis state correctly stored and retrieved')
coordinator.clear_redis()!
}
fn test_complex_flow() ! {
mut redis := redisclient.core_get()!
redis.flushdb()!
mut coordinator := flows.new(
name: 'test_complex_flow'
redis: redis
ai: none
)!
// Step 1: Validate
mut validate := coordinator.step_new(
name: 'validate_input'
description: 'Validate input parameters'
f: fn (mut s flows.Step) ! {
println(' Validating input...')
s.context['validated'] = 'true'
}
)!
// Step 2: Transform (next step after validate)
mut transform := coordinator.step_new(
name: 'transform_data'
description: 'Transform input data'
f: fn (mut s flows.Step) ! {
println(' Transforming data...')
s.context['transformed'] = 'true'
}
)!
// Step 3a: Save to DB (next step after transform)
mut save_db := coordinator.step_new(
name: 'save_to_database'
description: 'Save data to database'
f: fn (mut s flows.Step) ! {
println(' Saving to database...')
s.context['saved'] = 'true'
}
)!
// Step 3b: Send notification (next step after transform)
mut notify := coordinator.step_new(
name: 'send_notification'
description: 'Send notification'
f: fn (mut s flows.Step) ! {
println(' Sending notification...')
s.context['notified'] = 'true'
}
)!
// Step 4: Cleanup (final step)
mut cleanup := coordinator.step_new(
name: 'cleanup'
description: 'Cleanup resources'
f: fn (mut s flows.Step) ! {
println(' Cleaning up...')
s.context['cleaned'] = 'true'
}
)!
// Build the flow chain
validate.next_step_add(transform)
transform.next_step_add(save_db)
transform.next_step_add(notify)
save_db.next_step_add(cleanup)
notify.next_step_add(cleanup)
coordinator.run()!
// Verify all steps executed
all_states := coordinator.get_all_steps_state()!
println(' Total steps executed: ${all_states.len}')
for state in all_states {
name := state['name'] or { 'unknown' }
status := state['status'] or { 'unknown' }
duration := state['duration'] or { '0' }
println(' - ${name}: ${status} (${duration}ms)')
}
assert all_states.len >= 5, 'Expected at least 5 steps'
println(' Test 5 PASSED: Complex flow executed successfully')
coordinator.clear_redis()!
}

View File

@@ -0,0 +1,12 @@
#!/usr/bin/env hero
!!atlas.scan
git_url: 'https://git.ourworld.tf/tfgrid/docs_tfgrid4/src/branch/main/collections/mycelium_economics'
!!atlas.scan
git_url: 'https://git.ourworld.tf/tfgrid/docs_tfgrid4/src/branch/main/collections/authentic_web'
// !!atlas.scan
// git_url: 'https://git.ourworld.tf/geomind/docs_geomind/src/branch/main/collections/usecases'
!!atlas.export destination: '/tmp/atlas_export'

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env hero
!!atlas.scan
git_url: 'https://git.ourworld.tf/geomind/atlas_geomind/src/branch/main/content'
meta_path: '/tmp/atlas_export_meta'
!!atlas.scan
git_url: 'https://git.ourworld.tf/tfgrid/atlas_threefold/src/branch/main/content'
meta_path: '/tmp/atlas_export_meta'
ignore3: 'static,templates,groups'
!!atlas.export
destination: '/tmp/atlas_export_test'
include: true
redis: true

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env hero
!!atlas.scan git_url:"https://git.ourworld.tf/tfgrid/docs_tfgrid4/src/branch/main/collections/tests"
!!atlas.export destination: '/tmp/atlas_export'

98
examples/data/atlas/example.vsh Executable file
View File

@@ -0,0 +1,98 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.data.atlas
import incubaid.herolib.core.pathlib
import incubaid.herolib.web.atlas_client
import os
// Example: Atlas Export and AtlasClient Usage
println('Atlas Export & Client Example')
println('============================================================')
// Setup test directory
test_dir := '/tmp/atlas_example'
export_dir := '/tmp/atlas_export'
os.rmdir_all(test_dir) or {}
os.rmdir_all(export_dir) or {}
os.mkdir_all(test_dir)!
// Create a collection with some content
col_path := '${test_dir}/docs'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:docs')!
mut page1 := pathlib.get_file(path: '${col_path}/intro.md', create: true)!
page1.write('# Introduction\n\nWelcome to the docs!')!
mut page2 := pathlib.get_file(path: '${col_path}/guide.md', create: true)!
page2.write('# Guide\n\n!!include docs:intro\n\nMore content here.')!
// Create and scan atlas
println('\n1. Creating Atlas and scanning...')
mut a := atlas.new(name: 'my_docs')!
a.scan(path: test_dir)!
println(' Found ${a.collections.len} collection(s)')
// Validate links
println('\n2. Validating links...')
a.validate_links()!
col := a.get_collection('docs')!
if col.has_errors() {
println(' Errors found:')
col.print_errors()
} else {
println(' No errors found!')
}
// Export collections
println('\n3. Exporting collections to ${export_dir}...')
a.export(
destination: export_dir
include: true // Process includes during export
redis: false // Don't use Redis for this example
)!
println(' Export complete')
// Use AtlasClient to access exported content
println('\n4. Using AtlasClient to read exported content...')
mut client := atlas_client.new(export_dir: export_dir)!
// List collections
collections := client.list_collections()!
println(' Collections: ${collections}')
// List pages in docs collection
pages := client.list_pages('docs')!
println(' Pages in docs: ${pages}')
// Read page content
println('\n5. Reading page content via AtlasClient...')
intro_content := client.get_page_content('docs', 'intro')!
println(' intro.md content:')
println(' ${intro_content}')
guide_content := client.get_page_content('docs', 'guide')!
println('\n guide.md content (with includes processed):')
println(' ${guide_content}')
// Get metadata
println('\n6. Accessing metadata...')
metadata := client.get_collection_metadata('docs')!
println(' Collection name: ${metadata.name}')
println(' Collection path: ${metadata.path}')
println(' Number of pages: ${metadata.pages.len}')
println('\n Example completed successfully!')
println('\nExported files are in: ${export_dir}')
println(' - content/docs/intro.md')
println(' - content/docs/guide.md')
println(' - meta/docs.json')
// Cleanup (commented out so you can inspect the files)
// os.rmdir_all(test_dir) or {}
// os.rmdir_all(export_dir) or {}

View File

@@ -0,0 +1,198 @@
# HeroPrompt Example
Generate structured AI prompts from your codebase with file selection and workspace management.
## Quick Start
Run the example:
```bash
./examples/develop/heroprompt/prompt_example.vsh
```
This example demonstrates the complete workflow from creating a workspace to generating AI prompts.
---
## What is HeroPrompt?
HeroPrompt helps you organize code files and generate structured prompts for AI analysis:
- **Workspace Management**: Organize files into logical workspaces
- **File Selection**: Select specific files or entire directories
- **Prompt Generation**: Generate formatted prompts with file trees and contents
- **Redis Persistence**: All data persists across sessions
- **Active Workspace**: Easily switch between different workspaces
---
## Basic Usage
### 1. Create Instance and Workspace
```v
import incubaid.herolib.develop.heroprompt
// Create or get instance
mut hp := heroprompt.get(name: 'my_project', create: true)!
// Create workspace (first workspace is automatically active)
mut workspace := hp.new_workspace(
name: 'my_workspace'
description: 'My project workspace'
)!
```
### 2. Add Directories
```v
// Add directory and scan all files
mut dir := workspace.add_directory(
path: '/path/to/your/code'
name: 'my_code'
scan: true // Automatically scans all files and subdirectories
)!
```
### 3. Select Files
```v
// Select specific files
dir.select_file(path: '/path/to/file1.v')!
dir.select_file(path: '/path/to/file2.v')!
// Or select all files in directory
dir.select_all()!
```
### 4. Generate Prompt
```v
// Generate AI prompt with selected files
prompt := workspace.generate_prompt(
instruction: 'Review these files and suggest improvements'
)!
println(prompt)
```
---
## Generated Prompt Format
The generated prompt includes three sections:
```
<user_instructions>
Review these files and suggest improvements
</user_instructions>
<file_map>
my_project/
├── src/
│ ├── main.v *
│ └── utils.v *
└── README.md *
</file_map>
<file_contents>
File: /path/to/src/main.v
```v
module main
...
```
</file_contents>
```
---
## API Reference
### Factory Functions
```v
heroprompt.get(name: 'my_project', create: true)! // Get or create
heroprompt.delete(name: 'my_project')! // Delete instance
heroprompt.exists(name: 'my_project')! // Check if exists
heroprompt.list()! // List all instances
```
### HeroPrompt Methods
```v
hp.new_workspace(name: 'ws', description: 'desc')! // Create workspace
hp.get_workspace('ws')! // Get workspace by name
hp.list_workspaces() // List all workspaces
hp.delete_workspace('ws')! // Delete workspace
hp.get_active_workspace()! // Get active workspace
hp.set_active_workspace('ws')! // Set active workspace
```
### Workspace Methods
```v
ws.add_directory(path: '/path', name: 'dir', scan: true)! // Add directory
ws.list_directories() // List directories
ws.remove_directory(id: 'dir_id')! // Remove directory
ws.generate_prompt(instruction: 'Review')! // Generate prompt
ws.generate_file_map()! // Generate file tree
ws.generate_file_contents()! // Generate contents
```
### Directory Methods
```v
dir.select_file(path: '/path/to/file')! // Select file
dir.select_all()! // Select all files
dir.deselect_file(path: '/path/to/file')! // Deselect file
dir.deselect_all()! // Deselect all files
```
---
## Features
### Active Workspace
```v
// Get the currently active workspace
mut active := hp.get_active_workspace()!
// Switch to a different workspace
hp.set_active_workspace('other_workspace')!
```
### Multiple Workspaces
```v
// Create multiple workspaces for different purposes
mut backend := hp.new_workspace(name: 'backend')!
mut frontend := hp.new_workspace(name: 'frontend')!
mut docs := hp.new_workspace(name: 'documentation')!
```
### File Selection
```v
// Select individual files
dir.select_file(path: '/path/to/file.v')!
// Select all files in directory
dir.select_all()!
// Deselect files
dir.deselect_file(path: '/path/to/file.v')!
dir.deselect_all()!
```
---
## Tips
- Always start with cleanup (`heroprompt.delete()`) in examples to ensure a fresh state
- The first workspace created is automatically set as active
- File selection persists to Redis automatically
- Use `scan: true` when adding directories to automatically scan all files
- Selected files are tracked per directory for efficient management

View File

@@ -1,50 +0,0 @@
#!/usr/bin/env -S v -n -w -gc none -cg -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.develop.heroprompt
import os
// mut workspace := heroprompt.new(
// path: '${os.home_dir()}/code/github/incubaid/herolib'
// name: 'workspace'
// )!
mut workspace := heroprompt.get(
name: 'example_ws'
path: '${os.home_dir()}/code/github/incubaid/herolib'
create: true
)!
println('workspace (initial): ${workspace}')
println('selected (initial): ${workspace.selected_children()}')
// Add a directory and a file
workspace.add_dir(path: '${os.home_dir()}/code/github/incubaid/herolib/docker')!
workspace.add_file(
path: '${os.home_dir()}/code/github/incubaid/herolib/docker/docker_ubuntu_install.sh'
)!
println('selected (after add): ${workspace.selected_children()}')
// Build a prompt from current selection (should be empty now)
mut prompt := workspace.prompt(
text: 'Using the selected files, i want you to get all print statments'
)
println('--- PROMPT START ---')
println(prompt)
println('--- PROMPT END ---')
// Remove the file by name, then the directory by name
workspace.remove_file(name: 'docker_ubuntu_install.sh') or { println('remove_file: ${err}') }
workspace.remove_dir(name: 'docker') or { println('remove_dir: ${err}') }
println('selected (after remove): ${workspace.selected_children()}')
// List workspaces (names only)
mut all := heroprompt.list_workspaces() or { []&heroprompt.Workspace{} }
mut names := []string{}
for w in all {
names << w.name
}
println('workspaces: ${names}')
// Optionally delete the example workspace
workspace.delete_workspace() or { println('delete_workspace: ${err}') }

View File

@@ -0,0 +1,145 @@
#!/usr/bin/env -S v -n -w -gc none -cg -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.develop.heroprompt
import os
println('=== HeroPrompt: AI Prompt Generation Example ===\n')
// ============================================================================
// STEP 1: Cleanup and Setup
// ============================================================================
// Always start fresh - delete any existing instance
println('Step 1: Cleaning up any existing instance...')
heroprompt.delete(name: 'prompt_demo') or {}
println(' Cleanup complete\n')
// ============================================================================
// STEP 2: Create HeroPrompt Instance
// ============================================================================
// Get or create a new HeroPrompt instance
// The 'create: true' parameter will create it if it doesn't exist
println('Step 2: Creating HeroPrompt instance...')
mut hp := heroprompt.get(name: 'prompt_demo', create: true)!
println(' Created instance: ${hp.name}\n')
// ============================================================================
// STEP 3: Create Workspace
// ============================================================================
// A workspace is a collection of directories and files
// The first workspace is automatically set as active
println('Step 3: Creating workspace...')
mut workspace := hp.new_workspace(
name: 'my_project'
description: 'Example project workspace'
)!
println(' Created workspace: ${workspace.name}')
println(' Active: ${workspace.is_active}')
println(' Description: ${workspace.description}\n')
// ============================================================================
// STEP 4: Add Directories to Workspace
// ============================================================================
// Add directories containing code you want to analyze
// The 'scan: true' parameter automatically scans all files and subdirectories
println('Step 4: Adding directories to workspace...')
homepath := os.home_dir()
// Add the examples directory
mut examples_dir := workspace.add_directory(
path: '${homepath}/code/github/incubaid/herolib/examples/develop/heroprompt'
name: 'examples'
scan: true
)!
println(' Added directory: examples')
// Add the library directory
mut lib_dir := workspace.add_directory(
path: '${homepath}/code/github/incubaid/herolib/lib/develop/heroprompt'
name: 'library'
scan: true
)!
println(' Added directory: library\n')
// ============================================================================
// STEP 5: Select Specific Files
// ============================================================================
// You can select specific files from directories for prompt generation
// This is useful when you only want to analyze certain files
println('Step 5: Selecting specific files...')
// Select individual files from the examples directory
examples_dir.select_file(
path: '${homepath}/code/github/incubaid/herolib/examples/develop/heroprompt/README.md'
)!
println(' Selected: README.md')
examples_dir.select_file(
path: '${homepath}/code/github/incubaid/herolib/examples/develop/heroprompt/prompt_example.vsh'
)!
println(' Selected: prompt_example.vsh')
// Select all files from the library directory
lib_dir.select_all()!
println(' Selected all files in library directory\n')
// ============================================================================
// STEP 6: Generate AI Prompt
// ============================================================================
// Generate a complete prompt with file map, file contents, and instructions
// The prompt automatically includes only the selected files
println('Step 6: Generating AI prompt...')
prompt := workspace.generate_prompt(
instruction: 'Review the selected files and provide suggestions for improvements.'
)!
println(' Generated prompt')
println(' Total length: ${prompt.len} characters\n')
// ============================================================================
// STEP 7: Display Prompt Preview
// ============================================================================
println('Step 7: Prompt preview (first 800 characters)...')
preview_len := if prompt.len > 800 { 800 } else { prompt.len }
println(prompt[..preview_len])
// ============================================================================
// STEP 8: Alternative - Get Active Workspace
// ============================================================================
// You can retrieve the active workspace without knowing its name
println('Step 8: Working with active workspace...')
mut active_ws := hp.get_active_workspace()!
println(' Retrieved active workspace: ${active_ws.name}')
println(' Directories: ${active_ws.directories.len}')
println(' Files: ${active_ws.files.len}\n')
// ============================================================================
// STEP 9: Set Different Active Workspace
// ============================================================================
// You can create multiple workspaces and switch between them
println('Step 9: Creating and switching workspaces...')
// Create a second workspace
mut workspace2 := hp.new_workspace(
name: 'documentation'
description: 'Documentation workspace'
is_active: false
)!
println(' Created workspace: ${workspace2.name}')
// Switch active workspace
hp.set_active_workspace('documentation')!
println(' Set active workspace to: documentation')
// Verify the switch
active_ws = hp.get_active_workspace()!
println(' Current active workspace: ${active_ws.name}\n')
// ============================================================================
// STEP 10: Cleanup
// ============================================================================
println('Step 10: Cleanup...')
heroprompt.delete(name: 'prompt_demo')!
println(' Deleted instance\n')

View File

@@ -1 +0,0 @@
!!git.check filter:'herolib'

View File

@@ -1,13 +1,15 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals -no-skip-unused run
import incubaid.herolib.hero.heromodels
import incubaid.herolib.hero.db
import time
fn main() {
// Start the server in a background thread with authentication disabled for testing
spawn fn () ! {
heromodels.new(reset: true, name: 'test')!
spawn fn () {
heromodels.new(reset: true, name: 'test') or {
eprintln('Failed to initialize HeroModels: ${err}')
exit(1)
}
heromodels.server_start(
name: 'test'
port: 8080
@@ -17,7 +19,10 @@ fn main() {
allowed_origins: [
'http://localhost:5173',
]
) or { panic('Failed to start HeroModels server: ${err}') }
) or {
eprintln('Failed to start HeroModels server: ${err}')
exit(1)
}
}()
// Keep the main thread alive

View File

@@ -0,0 +1,93 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.hero.heromodels
// Initialize database
mut mydb := heromodels.new()!
// Create goals
mut goals := [
heromodels.Goal{
id: 'G1'
title: 'Faster Requirements'
description: 'Reduce PRD creation time to under 1 day'
gtype: .product
},
]
// Create use cases
mut use_cases := [
heromodels.UseCase{
id: 'UC1'
title: 'Generate PRD'
actor: 'Product Manager'
goal: 'Create validated PRD'
steps: ['Select template', 'Fill fields', 'Export to Markdown']
success: 'Complete PRD generated'
failure: 'Validation failed'
},
]
// Create requirements
mut criterion := heromodels.AcceptanceCriterion{
id: 'AC1'
description: 'Display template list'
condition: 'List contains >= 5 templates'
}
mut requirements := [
heromodels.Requirement{
id: 'R1'
category: 'Editor'
title: 'Template Selection'
rtype: .functional
description: 'User can select from templates'
priority: .high
criteria: [criterion]
dependencies: []
},
]
// Create constraints
mut constraints := [
heromodels.Constraint{
id: 'C1'
title: 'ARM64 Support'
description: 'Must run on ARM64 infrastructure'
ctype: .technica
},
]
// Create risks
mut risks := map[string]string{}
risks['RISK1'] = 'Templates too limited Add community contributions'
risks['RISK2'] = 'AI suggestions inaccurate Add review workflow'
// Create a new PRD object
mut prd := mydb.prd.new(
product_name: 'Lumina PRD Builder'
version: 'v1.0'
overview: 'Tool to create structured PRDs quickly'
vision: 'Enable teams to generate clear requirements in minutes'
goals: goals
use_cases: use_cases
requirements: requirements
constraints: constraints
risks: risks
)!
// Save to database
prd = mydb.prd.set(prd)!
println(' Created PRD with ID: ${prd.id}')
// Retrieve from database
mut retrieved := mydb.prd.get(prd.id)!
println(' Retrieved PRD: ${retrieved.product_name}')
// List all PRDs
mut all_prds := mydb.prd.list()!
println(' Total PRDs in database: ${all_prds.len}')
// Check if exists
exists := mydb.prd.exist(prd.id)!
println(' PRD exists: ${exists}')

View File

@@ -5,7 +5,7 @@ import incubaid.herolib.schemas.openrpc
import os
// 1. Create a new server instance
mut server := heroserver.new(port: 8080)!
mut server := heroserver.new(port: 8081, auth_enabled: false)!
// 2. Create and register your OpenRPC handlers
// These handlers must conform to the `openrpc.OpenRPCHandler` interface.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
{
"openrpc": "1.2.6",
"info": {
"title": "Comment Service",
"description": "A simple service for managing comments.",
"version": "1.0.0"
},
"methods": [
{
"name": "add_comment",
"summary": "Add a new comment",
"params": [
{
"name": "text",
"description": "The content of the comment.",
"required": true,
"schema": {
"type": "string"
}
}
],
"result": {
"name": "comment_id",
"description": "The ID of the newly created comment.",
"schema": {
"type": "string"
}
}
},
{
"name": "get_comment",
"summary": "Get a comment by ID",
"description": "Retrieves a specific comment using its unique identifier.",
"params": [
{
"name": "id",
"description": "The unique identifier of the comment to retrieve.",
"required": true,
"schema": {
"type": "number",
"example": "1"
}
},
{
"name": "include_metadata",
"description": "Whether to include metadata in the response.",
"required": false,
"schema": {
"type": "boolean",
"example": true
}
}
],
"result": {
"name": "comment",
"description": "The requested comment object.",
"schema": {
"type": "object",
"example": {
"id": 1,
"text": "This is a sample comment",
"created_at": "2024-01-15T10:30:00Z"
}
}
}
}
],
"components": {}
}

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.base.redis
println('=== Redis Installer Example ===\n')
// Create configuration
// You can customize port, datadir, and ipaddr as needed
config := redis.RedisInstall{
port: 6379 // Redis port
datadir: '/var/lib/redis' // Data directory (standard location)
ipaddr: 'localhost' // Bind address
}
// Check if Redis is already running
if redis.check(config) {
println('INFO: Redis is already running on port ${config.port}')
println(' To reinstall, stop Redis first: redis.stop()!')
} else {
// Install and start Redis
println('Installing and starting Redis...')
println(' Port: ${config.port}')
println(' Data directory: ${config.datadir}')
println(' Bind address: ${config.ipaddr}\n')
redis.redis_install(config)!
// Verify installation
if redis.check(config) {
println('\nSUCCESS: Redis installed and started successfully!')
println(' You can now connect to Redis on port ${config.port}')
println(' Test with: redis-cli ping')
} else {
println('\nERROR: Redis installation completed but failed to start')
println(' Check logs: journalctl -u redis-server -n 20')
}
}
println('\n=== Available Functions ===')
println(' redis.redis_install(config)! - Install and start Redis')
println(' redis.start(config)! - Start Redis')
println(' redis.stop()! - Stop Redis')
println(' redis.restart(config)! - Restart Redis')
println(' redis.check(config) - Check if running')
println('\nDone!')

View File

@@ -0,0 +1,209 @@
# Horus Installation Examples
This directory contains example scripts for installing and managing all Horus components using the herolib installer framework.
## Components
The Horus ecosystem consists of the following components:
1. **Coordinator** - Central coordination service (HTTP: 8081, WS: 9653)
2. **Supervisor** - Supervision and monitoring service (HTTP: 8082, WS: 9654)
3. **Hero Runner** - Command execution runner for Hero jobs
4. **Osiris Runner** - Database-backed runner
5. **SAL Runner** - System Abstraction Layer runner
## Quick Start
### Full Installation and Start
To install and start all Horus components:
```bash
# 1. Install all components (this will take several minutes)
./horus_full_install.vsh
# 2. Start all services
./horus_start_all.vsh
# 3. Check status
./horus_status.vsh
```
### Stop All Services
```bash
./horus_stop_all.vsh
```
## Available Scripts
### `horus_full_install.vsh`
Installs all Horus components:
- Checks and installs Redis if needed
- Checks and installs Rust if needed
- Clones the horus repository
- Builds all binaries from source
**Note:** This script can take 10-30 minutes depending on your system, as it compiles Rust code.
### `horus_start_all.vsh`
Starts all Horus services in the correct order:
1. Coordinator
2. Supervisor
3. Hero Runner
4. Osiris Runner
5. SAL Runner
### `horus_stop_all.vsh`
Stops all running Horus services in reverse order.
### `horus_status.vsh`
Checks and displays the status of all Horus services.
## Prerequisites
- **Operating System**: Linux or macOS
- **Dependencies** (automatically installed):
- Redis (required for all components)
- Rust toolchain (for building from source)
- Git (for cloning repositories)
## Configuration
All components use default configurations:
### Coordinator
- Binary: `/hero/var/bin/coordinator`
- HTTP Port: `8081`
- WebSocket Port: `9653`
- Redis: `127.0.0.1:6379`
### Supervisor
- Binary: `/hero/var/bin/supervisor`
- HTTP Port: `8082`
- WebSocket Port: `9654`
- Redis: `127.0.0.1:6379`
### Runners
- Hero Runner: `/hero/var/bin/herorunner`
- Osiris Runner: `/hero/var/bin/runner_osiris`
- SAL Runner: `/hero/var/bin/runner_sal`
## Custom Configuration
To customize the configuration, you can use heroscript:
```v
import incubaid.herolib.installers.horus.coordinator
mut coordinator := herocoordinator.get(create: true)!
coordinator.http_port = 9000
coordinator.ws_port = 9001
coordinator.log_level = 'debug'
herocoordinator.set(coordinator)!
coordinator.install()!
coordinator.start()!
```
## Testing
After starting the services, you can test them:
```bash
# Test Coordinator HTTP endpoint
curl http://127.0.0.1:8081
# Test Supervisor HTTP endpoint
curl http://127.0.0.1:8082
# Check running processes
pgrep -f coordinator
pgrep -f supervisor
pgrep -f herorunner
pgrep -f runner_osiris
pgrep -f runner_sal
```
## Troubleshooting
### Redis Not Running
If you get Redis connection errors:
```bash
# Check if Redis is running
redis-cli ping
# Start Redis (Ubuntu/Debian)
sudo systemctl start redis-server
# Start Redis (macOS with Homebrew)
brew services start redis
```
### Build Failures
If the build fails:
1. Ensure you have enough disk space (at least 5GB free)
2. Check that Rust is properly installed: `rustc --version`
3. Try cleaning the build: `cd /root/code/git.ourworld.tf/herocode/horus && cargo clean`
### Port Conflicts
If ports 8081 or 8082 are already in use, you can customize the ports in the configuration.
## Advanced Usage
### Individual Component Installation
You can install components individually:
```bash
# Install only coordinator
v run coordinator_only.vsh
# Install only supervisor
v run supervisor_only.vsh
```
### Using with Heroscript
You can also use heroscript files for configuration:
```heroscript
!!herocoordinator.configure
name:'production'
http_port:8081
ws_port:9653
log_level:'info'
!!herocoordinator.install
!!herocoordinator.start
```
## Service Management
Services are managed using the system's startup manager (zinit or systemd):
```bash
# Check service status with systemd
systemctl status coordinator
# View logs
journalctl -u coordinator -f
```
## Cleanup
To completely remove all Horus components:
```bash
# Stop all services
./horus_stop_all.vsh
# Destroy all components (removes binaries)
v run horus_destroy_all.vsh
```
## Support
For issues or questions:
- Check the main Horus repository: https://git.ourworld.tf/herocode/horus
- Review the installer code in `lib/installers/horus/`

View File

@@ -0,0 +1,36 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
// Example usage of coordinator installer
// This will:
// 1. Check if Rust is installed (installs if not present)
// 2. Clone the horus repository
// 3. Build the coordinator binary
//
// Note: Redis must be pre-installed and running before using the coordinator
println('Building coordinator from horus repository...')
println('(This will install Rust if not already installed)\n')
// Create coordinator instance
mut coord := coordinator.new()!
// Build and install
// Note: This will skip the build if the binary already exists
coord.install()!
// To force a rebuild even if binary exists, use:
// coord.install(reset: true)!
println('\nCoordinator built and installed successfully!')
println('Binary location: ${coord.binary_path}')
// Note: To start the service, uncomment the lines below
// (requires proper zinit or screen session setup and Redis running)
// coord.start()!
// if coord.running()! {
// println('Coordinator is running!')
// }
// coord.stop()!
// coord.destroy()!

View File

@@ -0,0 +1,60 @@
// Horus Configuration Heroscript
// This file demonstrates how to configure all Horus components using heroscript
// Configure Coordinator
!!coordinator.configure
name:'default'
binary_path:'/hero/var/bin/coordinator'
redis_addr:'127.0.0.1:6379'
http_port:8081
ws_port:9653
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Supervisor
!!supervisor.configure
name:'default'
binary_path:'/hero/var/bin/supervisor'
redis_addr:'127.0.0.1:6379'
http_port:8082
ws_port:9654
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Hero Runner
!!herorunner.configure
name:'default'
binary_path:'/hero/var/bin/herorunner'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure Osiris Runner
!!osirisrunner.configure
name:'default'
binary_path:'/hero/var/bin/runner_osiris'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Configure SAL Runner
!!salrunner.configure
name:'default'
binary_path:'/hero/var/bin/runner_sal'
redis_addr:'127.0.0.1:6379'
log_level:'info'
repo_path:'/root/code/git.ourworld.tf/herocode/horus'
// Install all components
!!herocoordinator.install
!!supervisor.install
!!herorunner.install
!!osirisrunner.install
!!salrunner.install
// Start all services
!!herocoordinator.start name:'default'
!!supervisor.start name:'default'
!!herorunner.start name:'default'
!!osirisrunner.start name:'default'
!!salrunner.start name:'default'

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Full Horus Installation Example
// This script installs and configures all Horus components:
// - Coordinator (port 8081)
// - Supervisor (port 8082)
// - Hero Runner
// - Osiris Runner
// - SAL Runner
println('🚀 Starting Full Horus Installation')
// Step 1: Install Coordinator
println('\n📦 Step 1/5: Installing Coordinator...')
mut coordinator_installer := coordinator.get(create: true)!
coordinator_installer.install()!
println(' Coordinator installed at ${coordinator_installer.binary_path}')
// Step 2: Install Supervisor
println('\n📦 Step 2/5: Installing Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.install()!
println(' Supervisor installed at ${supervisor_inst.binary_path}')
// Step 3: Install Hero Runner
println('\n📦 Step 3/5: Installing Hero Runner...')
mut hero_runner := herorunner.get(create: true)!
hero_runner.install()!
println(' Hero Runner installed at ${hero_runner.binary_path}')
// Step 4: Install Osiris Runner
println('\n📦 Step 4/5: Installing Osiris Runner...')
mut osiris_runner := osirisrunner.get(create: true)!
osiris_runner.install()!
println(' Osiris Runner installed at ${osiris_runner.binary_path}')
// Step 5: Install SAL Runner
println('\n📦 Step 5/5: Installing SAL Runner...')
mut sal_runner := salrunner.get(create: true)!
sal_runner.install()!
println(' SAL Runner installed at ${sal_runner.binary_path}')
println('🎉 All Horus components installed successfully!')
println('\n📋 Installation Summary:')
println(' Coordinator: ${coordinator_installer.binary_path} (HTTP: ${coordinator_installer.http_port}, WS: ${coordinator_installer.ws_port})')
println(' Supervisor: ${supervisor_inst.binary_path} (HTTP: ${supervisor_inst.http_port}, WS: ${supervisor_inst.ws_port})')
println(' Hero Runner: ${hero_runner.binary_path}')
println(' Osiris Runner: ${osiris_runner.binary_path}')
println(' SAL Runner: ${sal_runner.binary_path}')
println('\n💡 Next Steps:')
println(' To start services, run: ./horus_start_all.vsh')
println(' To test individual components, see the other example scripts')

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
import time
// Start All Horus Services
// This script starts all Horus components in the correct order
println('🚀 Starting All Horus Services')
// Step 1: Start Coordinator
println('\n Step 1/5: Starting Coordinator...')
mut coordinator_installer := coordinator.get(name: 'ayman', create: true)!
coordinator_installer.start()!
if coordinator_installer.running()! {
println(' Coordinator is running on HTTP:${coordinator_installer.http_port} WS:${coordinator_installer.ws_port}')
} else {
println(' Coordinator failed to start')
}
// Step 2: Start Supervisor
println('\n Step 2/5: Starting Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.start()!
if supervisor_inst.running()! {
println(' Supervisor is running on HTTP:${supervisor_inst.http_port} WS:${supervisor_inst.ws_port}')
} else {
println(' Supervisor failed to start')
}
// Step 3: Start Hero Runner
println('\n Step 3/5: Starting Hero Runner...')
mut hero_runner := herorunner.get(create: true)!
hero_runner.start()!
if hero_runner.running()! {
println(' Hero Runner is running')
} else {
println(' Hero Runner failed to start')
}
// Step 4: Start Osiris Runner
println('\n Step 4/5: Starting Osiris Runner...')
mut osiris_runner := osirisrunner.get(create: true)!
osiris_runner.start()!
if osiris_runner.running()! {
println(' Osiris Runner is running')
} else {
println(' Osiris Runner failed to start')
}
// Step 5: Start SAL Runner
println('\n Step 5/5: Starting SAL Runner...')
mut sal_runner := salrunner.get(create: true)!
sal_runner.start()!
if sal_runner.running()! {
println(' SAL Runner is running')
} else {
println(' SAL Runner failed to start')
}
println('🎉 All Horus services started!')
println('\n📊 Service Status:')
coordinator_status := if coordinator_installer.running()! { ' Running' } else { ' Stopped' }
println(' Coordinator: ${coordinator_status} (http://127.0.0.1:${coordinator_installer.http_port})')
supervisor_status := if supervisor_inst.running()! { ' Running' } else { ' Stopped' }
println(' Supervisor: ${supervisor_status} (http://127.0.0.1:${supervisor_inst.http_port})')
hero_runner_status := if hero_runner.running()! { ' Running' } else { ' Stopped' }
println(' Hero Runner: ${hero_runner_status}')
osiris_runner_status := if osiris_runner.running()! { ' Running' } else { ' Stopped' }
println(' Osiris Runner: ${osiris_runner_status}')
sal_runner_status := if sal_runner.running()! { ' Running' } else { ' Stopped' }
println(' SAL Runner: ${sal_runner_status}')
println('\n💡 Next Steps:')
println(' To stop services, run: ./horus_stop_all.vsh')
println(' To check status, run: ./horus_status.vsh')

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Check Status of All Horus Services
println('📊 Horus Services Status')
println('=' * 60)
// Get all services
mut coordinator := herocoordinator.get()!
mut supervisor_inst := supervisor.get()!
mut hero_runner := herorunner.get()!
mut osiris_runner := osirisrunner.get()!
mut sal_runner := salrunner.get()!
// Check status
println('\n🔍 Checking service status...\n')
coord_running := coordinator.running()!
super_running := supervisor_inst.running()!
hero_running := hero_runner.running()!
osiris_running := osiris_runner.running()!
sal_running := sal_runner.running()!
println('Service Status Details')
println('-' * 60)
println('Coordinator ${if coord_running { ' Running' } else { ' Stopped' }} http://127.0.0.1:${coordinator.http_port}')
println('Supervisor ${if super_running { ' Running' } else { ' Stopped' }} http://127.0.0.1:${supervisor_inst.http_port}')
println('Hero Runner ${if hero_running { ' Running' } else { ' Stopped' }}')
println('Osiris Runner ${if osiris_running { ' Running' } else { ' Stopped' }}')
println('SAL Runner ${if sal_running { ' Running' } else { ' Stopped' }}')
println('\n' + '=' * 60)
// Count running services
mut running_count := 0
if coord_running {
running_count++
}
if super_running {
running_count++
}
if hero_running {
running_count++
}
if osiris_running {
running_count++
}
if sal_running {
running_count++
}
println('Summary: ${running_count}/5 services running')
if running_count == 5 {
println('🎉 All services are running!')
} else if running_count == 0 {
println('💤 All services are stopped')
} else {
println(' Some services are not running')
}

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
import incubaid.herolib.installers.horus.herorunner
import incubaid.herolib.installers.horus.osirisrunner
import incubaid.herolib.installers.horus.salrunner
// Stop All Horus Services
// This script stops all running Horus components
println('🛑 Stopping All Horus Services')
println('=' * 60)
// Stop in reverse order
println('\n Stopping SAL Runner...')
mut sal_runner := salrunner.get()!
sal_runner.stop()!
println(' SAL Runner stopped')
println('\n Stopping Osiris Runner...')
mut osiris_runner := osirisrunner.get()!
osiris_runner.stop()!
println(' Osiris Runner stopped')
println('\n Stopping Hero Runner...')
mut hero_runner := herorunner.get()!
hero_runner.stop()!
println(' Hero Runner stopped')
println('\n Stopping Supervisor...')
mut supervisor_inst := supervisor.get()!
supervisor_inst.stop()!
println(' Supervisor stopped')
println('\n Stopping Coordinator...')
mut coordinator := herocoordinator.get()!
coordinator.stop()!
println(' Coordinator stopped')
println('\n' + '=' * 60)
println(' All Horus services stopped!')
println('=' * 60)

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.horus.coordinator
import incubaid.herolib.installers.horus.supervisor
// Quick Start Example - Install and Start Coordinator and Supervisor
// This is a minimal example to get started with Horus
println('🚀 Horus Quick Start')
println('=' * 60)
println('This will install and start Coordinator and Supervisor')
println('(Runners can be added later using the full install script)')
println('=' * 60)
// Install Coordinator
println('\n📦 Installing Coordinator...')
mut coordinator := herocoordinator.get(create: true)!
coordinator.install()!
println(' Coordinator installed')
// Install Supervisor
println('\n📦 Installing Supervisor...')
mut supervisor_inst := supervisor.get(create: true)!
supervisor_inst.install()!
println(' Supervisor installed')
// Start services
println('\n Starting Coordinator...')
coordinator.start()!
if coordinator.running()! {
println(' Coordinator is running on http://127.0.0.1:${coordinator.http_port}')
}
println('\n Starting Supervisor...')
supervisor_inst.start()!
if supervisor_inst.running()! {
println(' Supervisor is running on http://127.0.0.1:${supervisor_inst.http_port}')
}
println('\n' + '=' * 60)
println('🎉 Quick Start Complete!')
println('=' * 60)
println('\n📊 Services Running:')
println(' Coordinator: http://127.0.0.1:${coordinator.http_port}')
println(' Supervisor: http://127.0.0.1:${supervisor_inst.http_port}')
println('\n💡 Next Steps:')
println(' Test coordinator: curl http://127.0.0.1:${coordinator.http_port}')
println(' Test supervisor: curl http://127.0.0.1:${supervisor_inst.http_port}')
println(' Install runners: ./horus_full_install.vsh')
println(' Check status: ./horus_status.vsh')
println(' Stop services: ./horus_stop_all.vsh')

3
examples/installers/k8s/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
cryptpad
element_chat
gitea

View File

@@ -0,0 +1,27 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.k8s.cryptpad
// This example demonstrates how to use the CryptPad installer.
// 1. Create a new installer instance with a specific hostname.
// Replace 'mycryptpad' with your desired hostname.
mut installer := cryptpad.get(
name: 'kristof'
create: true
)!
// cryptpad.delete()!
// 2. Configure the installer (all settings are optional with sensible defaults)
// installer.hostname = 'mycryptpad'
// installer.namespace = 'cryptpad'
// 3. Install CryptPad.
// This will generate the necessary Kubernetes YAML files and apply them to your cluster.
installer.install()!
// println('CryptPad installation started.')
// println('You can access it at https://${installer.hostname}.gent01.grid.tf')
// 4. To destroy the deployment, you can run the following:
// installer.destroy()!

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.k8s.element_chat
// This example demonstrates how to use the Element Chat installer.
// 1. Create a new installer instance with specific hostnames.
// Replace 'matrixchattest' and 'elementchattest' with your desired hostnames.
// Note: Use only alphanumeric characters (no underscores or dashes).
mut installer := element_chat.get(
name: 'kristof'
create: true
)!
// element_chat.delete()!
// 2. Configure the installer (all settings are optional with sensible defaults)
// installer.matrix_hostname = 'matrixchattest'
// installer.element_hostname = 'elementchattest'
// installer.namespace = 'chat'
// // Conduit (Matrix homeserver) configuration
// installer.conduit_port = 6167 // Default: 6167
// installer.database_backend = 'rocksdb' // Default: 'rocksdb' (can be 'sqlite')
// installer.database_path = '/var/lib/matrix-conduit' // Default: '/var/lib/matrix-conduit'
// installer.allow_registration = true // Default: true
// installer.allow_federation = true // Default: true
// installer.log_level = 'info' // Default: 'info' (can be 'debug', 'warn', 'error')
// // Element web client configuration
// installer.element_brand = 'Element' // Default: 'Element'
// 3. Install Element Chat.
// This will generate the necessary Kubernetes YAML files and apply them to your cluster.
installer.install()!
// println('Element Chat installation started.')
// println('Matrix homeserver will be available at: https://${installer.matrix_hostname}.gent01.grid.tf')
// println('Element web client will be available at: https://${installer.element_hostname}.gent01.grid.tf')
// 4. To destroy the deployment, you can run the following:
// installer.destroy()!

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.k8s.gitea
// This example demonstrates how to use the Gitea installer.
// 1. Create a new installer instance with a specific hostname.
// Replace 'mygitea' with your desired hostname.
// Note: Use only alphanumeric characters (no underscores or dashes).
mut installer := gitea.get(
name: 'kristof'
create: true
)!
// 2. Configure the installer (all settings are optional with sensible defaults)
// installer.hostname = 'giteaapp' // Default: 'giteaapp'
// installer.namespace = 'forge' // Default: 'forge'
// // Gitea server configuration
// installer.http_port = 3000 // Default: 3000
// installer.disable_registration = false // Default: false (allow new user registration)
// // Database configuration - Option 1: SQLite (default)
// installer.db_type = 'sqlite3' // Default: 'sqlite3'
// installer.db_path = '/data/gitea/gitea.db' // Default: '/data/gitea/gitea.db'
// // Database configuration - Option 2: PostgreSQL
// // When using postgres, a PostgreSQL pod will be automatically deployed
installer.db_type = 'postgres' // Use PostgreSQL instead of SQLite
installer.db_host = 'postgres' // Default: 'postgres' (PostgreSQL service name)
installer.db_name = 'gitea' // Default: 'gitea' (database name)
installer.db_user = 'gitea' // Default: 'gitea' (database user)
installer.db_password = 'gitea' // Default: 'gitea' (database password)
installer.storage_size = '5Gi' // Default: '5Gi' (PVC storage size)
// 3. Install Gitea.
// This will generate the necessary Kubernetes YAML files and apply them to your cluster.
installer.install()!
// println('Gitea installation started.')
// println('You can access it at: https://${installer.hostname}.gent01.grid.tf')
// 4. To destroy the deployment, you can run the following:
// installer.destroy()!

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.virt.crun_installer
mut crun := crun_installer.get()!
// To install
crun.install()!
// To remove
crun.destroy()!

View File

@@ -0,0 +1,11 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.installers.virt.kubernetes_installer
mut kubectl := kubernetes_installer.get(name: 'k_installer', create: true)!
// To install
kubectl.install()!
// To remove
kubectl.destroy()!

View File

@@ -0,0 +1,169 @@
# HeroPods Examples
This directory contains example HeroScript files demonstrating different HeroPods use cases.
## Prerequisites
- **Linux system** (HeroPods requires Linux-specific tools: ip, iptables, nsenter, crun)
- **Root/sudo access** (required for network configuration and container management)
- **Podman** (optional but recommended for image management)
- **Hero CLI** installed and configured
## Example Scripts
### 1. simple_container.heroscript
**Purpose**: Demonstrate basic container lifecycle management
**What it does**:
- Creates a HeroPods instance
- Creates an Alpine Linux container
- Starts the container
- Executes basic commands inside the container (uname, ls, cat, ps, env)
- Stops the container
- Deletes the container
**Run it**:
```bash
hero run examples/virt/heropods/simple_container.heroscript
```
**Use this when**: You want to learn the basic container operations without networking complexity.
---
### 2. ipv4_connection.heroscript
**Purpose**: Demonstrate IPv4 networking and internet connectivity
**What it does**:
- Creates a HeroPods instance with bridge networking
- Creates an Alpine Linux container
- Starts the container with IPv4 networking
- Verifies network configuration (interfaces, routes, DNS)
- Tests DNS resolution
- Tests HTTP/HTTPS connectivity to the internet
- Stops and deletes the container
**Run it**:
```bash
hero run examples/virt/heropods/ipv4_connection.heroscript
```
**Use this when**: You want to verify that IPv4 bridge networking and internet access work correctly.
---
### 3. container_mycelium.heroscript
**Purpose**: Demonstrate Mycelium IPv6 overlay networking
**What it does**:
- Creates a HeroPods instance
- Enables Mycelium IPv6 overlay network with all required configuration
- Creates an Alpine Linux container
- Starts the container with both IPv4 and IPv6 (Mycelium) networking
- Verifies IPv6 configuration
- Tests Mycelium IPv6 connectivity to public nodes
- Verifies dual-stack networking (IPv4 + IPv6)
- Stops and deletes the container
**Run it**:
```bash
hero run examples/virt/heropods/container_mycelium.heroscript
```
**Use this when**: You want to test Mycelium IPv6 overlay networking for encrypted peer-to-peer connectivity.
**Note**: Requires Mycelium to be installed and configured on the host system.
---
### 4. demo.heroscript
**Purpose**: Quick demonstration of HeroPods with both IPv4 and IPv6 networking
**What it does**:
- Combines IPv4 and Mycelium IPv6 networking in a single demo
- Shows a complete workflow from configuration to cleanup
- Serves as a quick reference for common operations
**Run it**:
```bash
hero run examples/virt/heropods/demo.heroscript
```
**Use this when**: You want a quick overview of HeroPods capabilities.
---
## Common Issues
### Permission Denied for ping/ping6
Alpine Linux containers don't have `CAP_NET_RAW` capability by default, which is required for ICMP packets (ping).
**Solution**: Use `wget`, `curl`, or `nc` for connectivity testing instead of ping.
### Mycelium Not Found
If you get errors about Mycelium not being installed:
**Solution**: The HeroPods Mycelium integration will automatically install Mycelium when you run `heropods.enable_mycelium`. Make sure you have internet connectivity and the required permissions.
### Container Already Exists
If you get errors about containers already existing:
**Solution**: Either delete the existing container manually or set `reset:true` in the `heropods.configure` action.
---
## Learning Path
We recommend running the examples in this order:
1. **simple_container.heroscript** - Learn basic container operations
2. **ipv4_connection.heroscript** - Understand IPv4 networking
3. **container_mycelium.heroscript** - Explore IPv6 overlay networking
4. **demo.heroscript** - See everything together
---
## Customization
Feel free to modify these scripts to:
- Use different container images (Ubuntu, custom images, etc.)
- Test different network configurations
- Add your own commands and tests
- Experiment with multiple containers
---
## Documentation
For more information, see:
- [HeroPods Main README](../../../lib/virt/heropods/readme.md)
- [Mycelium Integration Guide](../../../lib/virt/heropods/MYCELIUM_README.md)
- [Production Readiness Review](../../../lib/virt/heropods/PRODUCTION_READINESS_REVIEW.md)
---
## Support
If you encounter issues:
1. Check the logs in `~/.containers/logs/`
2. Verify your system meets the prerequisites
3. Review the error messages carefully
4. Consult the documentation linked above

View File

@@ -0,0 +1,114 @@
#!/usr/bin/env hero
// ============================================================================
// HeroPods Example: Mycelium IPv6 Overlay Networking
// ============================================================================
//
// This script demonstrates Mycelium IPv6 overlay networking:
// - End-to-end encrypted IPv6 connectivity
// - Peer-to-peer routing through public relay nodes
// - Container IPv6 address assignment from host's /64 prefix
// - Connectivity to other Mycelium nodes across the internet
//
// Mycelium provides each container with an IPv6 address in the 400::/7 range
// and enables encrypted communication with other Mycelium nodes.
// ============================================================================
// Step 1: Configure HeroPods instance
// This creates a HeroPods instance with default IPv4 networking
!!heropods.configure
name:'mycelium_demo'
reset:false
use_podman:true
// Step 2: Enable Mycelium IPv6 overlay network
// All parameters are required for Mycelium configuration
!!heropods.enable_mycelium
heropods:'mycelium_demo'
version:'v0.5.6'
ipv6_range:'400::/7'
key_path:'~/hero/cfg/priv_key.bin'
peers:'tcp://185.69.166.8:9651,quic://[2a02:1802:5e:0:ec4:7aff:fe51:e36b]:9651,tcp://65.109.18.113:9651,quic://[2a01:4f9:5a:1042::2]:9651,tcp://5.78.122.16:9651,quic://[2a01:4ff:1f0:8859::1]:9651,tcp://5.223.43.251:9651,quic://[2a01:4ff:2f0:3621::1]:9651,tcp://142.93.217.194:9651,quic://[2400:6180:100:d0::841:2001]:9651'
// Step 3: Create a new Alpine Linux container
// Alpine includes basic IPv6 networking tools
!!heropods.container_new
name:'mycelium_container'
image:'custom'
custom_image_name:'alpine_3_20'
docker_url:'docker.io/library/alpine:3.20'
// Step 4: Start the container
// This sets up both IPv4 and IPv6 (Mycelium) networking
!!heropods.container_start
name:'mycelium_container'
// Step 5: Verify IPv6 network configuration
// Show all network interfaces (including IPv6 addresses)
!!heropods.container_exec
name:'mycelium_container'
cmd:'ip addr show'
stdout:true
// Show IPv6 addresses specifically
!!heropods.container_exec
name:'mycelium_container'
cmd:'ip -6 addr show'
stdout:true
// Show IPv6 routing table
!!heropods.container_exec
name:'mycelium_container'
cmd:'ip -6 route show'
stdout:true
// Step 6: Test Mycelium IPv6 connectivity
// Ping a known public Mycelium node to verify connectivity
// Note: This requires the container to have CAP_NET_RAW capability for ping6
// If ping6 fails with permission denied, this is expected behavior in Alpine
!!heropods.container_exec
name:'mycelium_container'
cmd:'ping6 -c 3 400:8f3a:8d0e:3503:db8e:6a02:2e9:83dd'
stdout:true
// Alternative: Test IPv6 connectivity using nc (netcat) if available
// This doesn't require special capabilities
!!heropods.container_exec
name:'mycelium_container'
cmd:'nc -6 -zv -w 3 400:8f3a:8d0e:3503:db8e:6a02:2e9:83dd 80 2>&1 || echo nc test completed'
stdout:true
// Step 7: Show Mycelium-specific information
// Display the container's Mycelium IPv6 address
!!heropods.container_exec
name:'mycelium_container'
cmd:'ip -6 addr show | grep 400: || echo No Mycelium IPv6 address found'
stdout:true
// Show IPv6 neighbors (if any)
!!heropods.container_exec
name:'mycelium_container'
cmd:'ip -6 neigh show'
stdout:true
// Step 8: Verify dual-stack networking (IPv4 + IPv6)
// The container should have both IPv4 and IPv6 connectivity
// Test IPv4 connectivity
!!heropods.container_exec
name:'mycelium_container'
cmd:'wget -O- http://google.com --timeout=5 2>&1 | head -n 5'
stdout:true
// Step 9: Stop the container
// This cleans up both IPv4 and IPv6 (Mycelium) networking
!!heropods.container_stop
name:'mycelium_container'
// Step 10: Delete the container
// This removes the container and all associated resources
!!heropods.container_delete
name:'mycelium_container'

View File

@@ -0,0 +1,75 @@
#!/usr/bin/env hero
// ============================================================================
// HeroPods Keep-Alive Feature Test - Alpine Container
// ============================================================================
//
// This script demonstrates the keep_alive feature with an Alpine container.
//
// Test Scenario:
// Alpine's default CMD is /bin/sh, which exits immediately when run
// non-interactively (no stdin). This makes it perfect for testing keep_alive:
//
// 1. Container starts with CMD=["/bin/sh"]
// 2. /bin/sh exits immediately (exit code 0)
// 3. HeroPods detects the successful exit
// 4. HeroPods recreates the container with keep-alive command
// 5. Container remains running and accepts exec commands
//
// This demonstrates the core keep_alive functionality:
// - Detecting when a container's entrypoint/cmd exits
// - Checking the exit code
// - Injecting a keep-alive process on successful exit
// - Allowing subsequent exec commands
//
// ============================================================================
// Step 1: Configure HeroPods instance
!!heropods.configure
name:'hello_world'
reset:true
use_podman:true
// Step 2: Create a container with Alpine 3.20 image
// Using custom image type to automatically download from Docker Hub
!!heropods.container_new
name:'alpine_test_keepalive'
image:'custom'
custom_image_name:'alpine_test'
docker_url:'docker.io/library/alpine:3.20'
// Step 3: Start the container with keep_alive enabled
// Alpine's CMD is /bin/sh which exits immediately when run non-interactively.
// With keep_alive:true, HeroPods will:
// 1. Start the container with /bin/sh
// 2. Wait for /bin/sh to exit (which happens immediately)
// 3. Detect the successful exit (exit code 0)
// 4. Recreate the container with a keep-alive command (tail -f /dev/null)
// 5. The container will then remain running and accept exec commands
!!heropods.container_start
name:'alpine_test_keepalive'
keep_alive:true
// Step 4: Execute a simple hello world command
!!heropods.container_exec
name:'alpine_test_keepalive'
cmd:'echo Hello World from HeroPods'
stdout:true
// Step 5: Display OS information
!!heropods.container_exec
name:'alpine_test_keepalive'
cmd:'cat /etc/os-release'
stdout:true
// Step 6: Show running processes
!!heropods.container_exec
name:'alpine_test_keepalive'
cmd:'ps aux'
stdout:true
// Step 7: Verify Alpine version
!!heropods.container_exec
name:'alpine_test_keepalive'
cmd:'cat /etc/alpine-release'
stdout:true

View File

@@ -0,0 +1,27 @@
#!/usr/bin/env hero
// Step 1: Configure HeroPods instance
!!heropods.configure
name:'simple_demo'
reset:false
use_podman:true
// Step 2: Create a container with hero binary
!!heropods.container_new
name:'simple_container'
image:'custom'
custom_image_name:'hero_container'
docker_url:'docker.io/threefolddev/hero-container:latest'
// Step 3: Start the container with keep_alive enabled
// This will run the entrypoint, wait for it to complete, then inject a keep-alive process
!!heropods.container_start
name:'simple_container'
keep_alive:true
// Step 4: Execute hero command inside the container
!!heropods.container_exec
name:'simple_container'
cmd:'hero -help'
stdout:true

View File

@@ -2,17 +2,17 @@
import incubaid.herolib.virt.heropods
// Initialize factory
mut factory := heropods.new(
// Initialize heropods
mut heropods_ := heropods.new(
reset: false
use_podman: true
) or { panic('Failed to init ContainerFactory: ${err}') }
) or { panic('Failed to init HeroPods: ${err}') }
println('=== HeroPods Refactored API Demo ===')
// Step 1: factory.new() now only creates a container definition/handle
// Step 1: heropods_.new() now only creates a container definition/handle
// It does NOT create the actual container in the backend yet
mut container := factory.new(
mut container := heropods_.container_new(
name: 'demo_alpine'
image: .custom
custom_image_name: 'alpine_3_20'
@@ -56,7 +56,7 @@ println('✓ Container deleted successfully')
println('\n=== Demo completed! ===')
println('The refactored API now works as expected:')
println('- factory.new() creates definition only')
println('- heropods_.new() creates definition only')
println('- container.start() is idempotent')
println('- container.exec() works and returns results')
println('- container.delete() works on instances')

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env hero
// ============================================================================
// HeroPods Example: IPv4 Networking and Internet Connectivity
// ============================================================================
//
// This script demonstrates IPv4 networking functionality:
// - Bridge networking with automatic IP allocation
// - NAT for outbound internet access
// - DNS resolution
// - HTTP connectivity testing
//
// The container gets an IP address from the bridge subnet (default: 10.10.0.0/24)
// and can access the internet through NAT.
// ============================================================================
// Step 1: Configure HeroPods instance with IPv4 networking
// This creates a HeroPods instance with bridge networking enabled
!!heropods.configure
name:'ipv4_demo'
reset:false
use_podman:true
bridge_name:'heropods0'
subnet:'10.10.0.0/24'
gateway_ip:'10.10.0.1'
dns_servers:['8.8.8.8', '8.8.4.4']
// Step 2: Create a new Alpine Linux container
// Alpine is lightweight and includes basic networking tools
!!heropods.container_new
name:'ipv4_container'
image:'custom'
custom_image_name:'alpine_3_20'
docker_url:'docker.io/library/alpine:3.20'
// Step 3: Start the container
// This sets up the veth pair and configures IPv4 networking
!!heropods.container_start
name:'ipv4_container'
// Step 4: Verify network configuration inside the container
// Show network interfaces and IP addresses
!!heropods.container_exec
name:'ipv4_container'
cmd:'ip addr show'
stdout:true
// Show routing table
!!heropods.container_exec
name:'ipv4_container'
cmd:'ip route show'
stdout:true
// Show DNS configuration
!!heropods.container_exec
name:'ipv4_container'
cmd:'cat /etc/resolv.conf'
stdout:true
// Step 5: Test DNS resolution
// Verify that DNS queries work correctly
!!heropods.container_exec
name:'ipv4_container'
cmd:'nslookup google.com'
stdout:true
// Step 6: Test HTTP connectivity
// Use wget to verify internet access (ping requires CAP_NET_RAW capability)
!!heropods.container_exec
name:'ipv4_container'
cmd:'wget -O- http://google.com --timeout=5 2>&1 | head -n 10'
stdout:true
// Test another website to confirm connectivity
!!heropods.container_exec
name:'ipv4_container'
cmd:'wget -O- http://example.com --timeout=5 2>&1 | head -n 10'
stdout:true
// Step 7: Test HTTPS connectivity (if wget supports it)
!!heropods.container_exec
name:'ipv4_container'
cmd:'wget -O- https://www.google.com --timeout=5 --no-check-certificate 2>&1 | head -n 10'
stdout:true
// Step 8: Stop the container
// This removes the veth pair and cleans up network configuration
!!heropods.container_stop
name:'ipv4_container'
// Step 9: Delete the container
// This removes the container and all associated resources
!!heropods.container_delete
name:'ipv4_container'

6
examples/virt/heropods/runcommands.vsh Normal file → Executable file
View File

@@ -2,12 +2,12 @@
import incubaid.herolib.virt.heropods
mut factory := heropods.new(
mut heropods_ := heropods.new(
reset: false
use_podman: true
) or { panic('Failed to init ContainerFactory: ${err}') }
) or { panic('Failed to init HeroPods: ${err}') }
mut container := factory.new(
mut container := heropods_.container_new(
name: 'alpine_demo'
image: .custom
custom_image_name: 'alpine_3_20'

View File

@@ -0,0 +1,79 @@
#!/usr/bin/env hero
// ============================================================================
// HeroPods Example: Simple Container Lifecycle Management
// ============================================================================
//
// This script demonstrates the basic container lifecycle operations:
// - Creating a container
// - Starting a container
// - Executing commands inside the container
// - Stopping a container
// - Deleting a container
//
// No networking tests - just basic container operations.
// ============================================================================
// Step 1: Configure HeroPods instance
// This creates a HeroPods instance named 'simple_demo' with default settings
!!heropods.configure
name:'simple_demo'
reset:false
use_podman:true
// Step 2: Create a new Alpine Linux container
// This pulls the Alpine 3.20 image from Docker Hub and prepares it for use
!!heropods.container_new
name:'simple_container'
image:'custom'
custom_image_name:'alpine_3_20'
docker_url:'docker.io/library/alpine:3.20'
// Step 3: Start the container
// This starts the container using crun (OCI runtime)
!!heropods.container_start
name:'simple_container'
// Step 4: Execute basic commands inside the container
// These commands demonstrate that the container is running and functional
// Show kernel information
!!heropods.container_exec
name:'simple_container'
cmd:'uname -a'
stdout:true
// List root directory contents
!!heropods.container_exec
name:'simple_container'
cmd:'ls -la /'
stdout:true
// Show OS release information
!!heropods.container_exec
name:'simple_container'
cmd:'cat /etc/os-release'
stdout:true
// Show current processes
!!heropods.container_exec
name:'simple_container'
cmd:'ps aux'
stdout:true
// Show environment variables
!!heropods.container_exec
name:'simple_container'
cmd:'env'
stdout:true
// Step 5: Stop the container
// This gracefully stops the container (SIGTERM, then SIGKILL if needed)
!!heropods.container_stop
name:'simple_container'
// Step 6: Delete the container
// This removes the container and cleans up all associated resources
!!heropods.container_delete
name:'simple_container'

View File

@@ -38,7 +38,7 @@ mut cl := hetznermanager.get()!
// println('test cache, first time slow then fast')
// }
// println(cl.servers_list()!)
println(cl.servers_list()!)
// mut serverinfo := cl.server_info_get(name: 'kristof2')!

1
examples/virt/kubernetes/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
kubernetes_example

View File

@@ -0,0 +1,177 @@
# Kubernetes Client Example
This example demonstrates the Kubernetes client functionality in HeroLib, including JSON parsing and cluster interaction.
## Prerequisites
1. **kubectl installed**: The Kubernetes command-line tool must be installed on your system.
- macOS: `brew install kubectl`
- Linux: See [official installation guide](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
- Windows: See [official installation guide](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/)
2. **Kubernetes cluster**: You need access to a Kubernetes cluster. For local development, you can use:
- **Minikube**: `brew install minikube && minikube start`
- **Kind**: `brew install kind && kind create cluster`
- **Docker Desktop**: Enable Kubernetes in Docker Desktop settings
- **k3s**: Lightweight Kubernetes distribution
## Running the Example
### Method 1: Direct Execution (Recommended)
```bash
# Make the script executable
chmod +x examples/virt/kubernetes/kubernetes_example.vsh
# Run the script
./examples/virt/kubernetes/kubernetes_example.vsh
```
### Method 2: Using V Command
```bash
v -enable-globals run examples/virt/kubernetes/kubernetes_example.vsh
```
## What the Example Demonstrates
The example script demonstrates the following functionality:
### 1. **Cluster Information**
- Retrieves Kubernetes cluster version
- Counts total nodes in the cluster
- Counts total namespaces
- Counts running pods across all namespaces
### 2. **Pod Management**
- Lists all pods in the `default` namespace
- Displays pod details:
- Name, namespace, status
- Node assignment and IP address
- Container names
- Labels and creation timestamp
### 3. **Deployment Management**
- Lists all deployments in the `default` namespace
- Shows deployment information:
- Name and namespace
- Replica counts (desired, ready, available, updated)
- Labels and creation timestamp
### 4. **Service Management**
- Lists all services in the `default` namespace
- Displays service details:
- Name, namespace, and type (ClusterIP, NodePort, LoadBalancer)
- Cluster IP and external IP (if applicable)
- Exposed ports and protocols
- Labels and creation timestamp
## Expected Output
### With a Running Cluster
When connected to a Kubernetes cluster with resources, you'll see formatted output like:
```
╔════════════════════════════════════════════════════════════════╗
║ Kubernetes Client Example - HeroLib ║
║ Demonstrates JSON parsing and cluster interaction ║
╚════════════════════════════════════════════════════════════════╝
[INFO] Creating Kubernetes client instance...
[SUCCESS] Kubernetes client created successfully
- 1. Cluster Information
[INFO] Retrieving cluster information...
┌─────────────────────────────────────────────────────────────┐
│ Cluster Overview │
├─────────────────────────────────────────────────────────────┤
│ API Server: https://127.0.0.1:6443 │
│ Version: v1.31.0 │
│ Nodes: 3 │
│ Namespaces: 5 │
│ Running Pods: 12 │
└─────────────────────────────────────────────────────────────┘
```
### Without a Cluster
If kubectl is not installed or no cluster is configured, you'll see helpful error messages:
```
Error: Failed to get cluster information
...
This usually means:
- kubectl is not installed
- No Kubernetes cluster is configured (check ~/.kube/config)
- The cluster is not accessible
To set up a local cluster, you can use:
- Minikube: https://minikube.sigs.k8s.io/docs/start/
- Kind: https://kind.sigs.k8s.io/docs/user/quick-start/
- Docker Desktop (includes Kubernetes)
```
## Creating Test Resources
If your cluster is empty, you can create test resources to see the example in action:
```bash
# Create a test pod
kubectl run nginx --image=nginx
# Create a test deployment
kubectl create deployment nginx-deployment --image=nginx --replicas=3
# Expose the deployment as a service
kubectl expose deployment nginx-deployment --port=80 --type=ClusterIP
```
## Code Structure
The example demonstrates proper usage of the HeroLib Kubernetes client:
1. **Factory Pattern**: Uses `kubernetes.new()` to create a client instance
2. **Error Handling**: Proper use of V's `!` error propagation and `or {}` blocks
3. **JSON Parsing**: All kubectl JSON output is parsed into structured V types
4. **Console Output**: Clear, formatted output using the `console` module
## Implementation Details
The Kubernetes client module uses:
- **Struct-based JSON decoding**: V's `json.decode(Type, data)` for type-safe parsing
- **Kubernetes API response structs**: Matching kubectl's JSON output format
- **Runtime resource structs**: Clean data structures for application use (`Pod`, `Deployment`, `Service`)
## Troubleshooting
### "kubectl: command not found"
Install kubectl using your package manager (see Prerequisites above).
### "The connection to the server was refused"
Start a local Kubernetes cluster:
```bash
minikube start
# or
kind create cluster
```
### "No resources found in default namespace"
Create test resources using the commands in the "Creating Test Resources" section above.
## Related Files
- **Implementation**: `lib/virt/kubernetes/kubernetes_client.v`
- **Data Models**: `lib/virt/kubernetes/kubernetes_resources_model.v`
- **Unit Tests**: `lib/virt/kubernetes/kubernetes_test.v`
- **Factory**: `lib/virt/kubernetes/kubernetes_factory_.v`

View File

@@ -0,0 +1,231 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.virt.kubernetes
import incubaid.herolib.ui.console
println('')
println(' Kubernetes Client Example - HeroLib ')
println(' Demonstrates JSON parsing and cluster interaction ')
println('')
println('')
// Create a Kubernetes client instance using the factory pattern
println('[INFO] Creating Kubernetes client instance...')
mut client := kubernetes.new() or {
console.print_header('Error: Failed to create Kubernetes client')
eprintln('${err}')
eprintln('')
eprintln('Make sure kubectl is installed and configured properly.')
eprintln('You can install kubectl ')
exit(1)
}
println('[SUCCESS] Kubernetes client created successfully')
println('')
// ============================================================================
// 1. Get Cluster Information
// ============================================================================
console.print_header('1. Cluster Information')
println('[INFO] Retrieving cluster information...')
println('')
cluster := client.cluster_info() or {
console.print_header('Error: Failed to get cluster information')
eprintln('${err}')
eprintln('')
eprintln('This usually means:')
eprintln(' - kubectl is not installed')
eprintln(' - No Kubernetes cluster is configured (check ~/.kube/config)')
eprintln(' - The cluster is not accessible')
eprintln('')
eprintln('To set up a local cluster, you can use:')
eprintln(' - Minikube: https://minikube.sigs.k8s.io/docs/start/')
eprintln(' - Kind: https://kind.sigs.k8s.io/docs/user/quick-start/')
eprintln(' - Docker Desktop (includes Kubernetes)')
exit(1)
}
println('')
println(' Cluster Overview ')
println('')
println(' API Server: ${cluster.api_server:-50}')
println(' Version: ${cluster.version:-50}')
println(' Nodes: ${cluster.nodes.str():-50}')
println(' Namespaces: ${cluster.namespaces.str():-50}')
println(' Running Pods: ${cluster.running_pods.str():-50}')
println('')
println('')
// ============================================================================
// 2. Get Pods in the 'default' namespace
// ============================================================================
console.print_header('2. Pods in "default" Namespace')
println('[INFO] Retrieving pods from the default namespace...')
println('')
pods := client.get_pods('default') or {
console.print_header('Warning: Failed to get pods')
eprintln('${err}')
eprintln('')
[]kubernetes.Pod{}
}
if pods.len == 0 {
println('No pods found in the default namespace.')
println('')
println('To create a test pod, run:')
println(' kubectl run nginx --image=nginx')
println('')
} else {
println('Found ${pods.len} pod(s) in the default namespace:')
println('')
for i, pod in pods {
println('')
println(' Pod #${i + 1:-56}')
println('')
println(' Name: ${pod.name:-50}')
println(' Namespace: ${pod.namespace:-50}')
println(' Status: ${pod.status:-50}')
println(' Node: ${pod.node:-50}')
println(' IP: ${pod.ip:-50}')
println(' Containers: ${pod.containers.join(', '):-50}')
println(' Created: ${pod.created_at:-50}')
if pod.labels.len > 0 {
println(' Labels: ')
for key, value in pod.labels {
label_str := ' ${key}=${value}'
println(' ${label_str:-58}')
}
}
println('')
println('')
}
}
// ============================================================================
// 3. Get Deployments in the 'default' namespace
// ============================================================================
console.print_header('3. Deployments in "default" Namespace')
println('[INFO] Retrieving deployments from the default namespace...')
println('')
deployments := client.get_deployments('default') or {
console.print_header('Warning: Failed to get deployments')
eprintln('${err}')
eprintln('')
[]kubernetes.Deployment{}
}
if deployments.len == 0 {
println('No deployments found in the default namespace.')
println('')
println('To create a test deployment, run:')
println(' kubectl create deployment nginx --image=nginx --replicas=3')
println('')
} else {
println('Found ${deployments.len} deployment(s) in the default namespace:')
println('')
for i, deploy in deployments {
ready_status := if deploy.ready_replicas == deploy.replicas { '' } else { '' }
println('')
println(' Deployment #${i + 1:-53}')
println('')
println(' Name: ${deploy.name:-44}')
println(' Namespace: ${deploy.namespace:-44}')
println(' Replicas: ${deploy.replicas.str():-44}')
println(' Ready Replicas: ${deploy.ready_replicas.str():-44}')
println(' Available: ${deploy.available_replicas.str():-44}')
println(' Updated: ${deploy.updated_replicas.str():-44}')
println(' Status: ${ready_status:-44}')
println(' Created: ${deploy.created_at:-44}')
if deploy.labels.len > 0 {
println(' Labels: ')
for key, value in deploy.labels {
label_str := ' ${key}=${value}'
println(' ${label_str:-58}')
}
}
println('')
println('')
}
}
// ============================================================================
// 4. Get Services in the 'default' namespace
// ============================================================================
console.print_header('4. Services in "default" Namespace')
println('[INFO] Retrieving services from the default namespace...')
println('')
services := client.get_services('default') or {
console.print_header('Warning: Failed to get services')
eprintln('${err}')
eprintln('')
[]kubernetes.Service{}
}
if services.len == 0 {
println('No services found in the default namespace.')
println('')
println('To create a test service, run:')
println(' kubectl expose deployment nginx --port=80 --type=ClusterIP')
println('')
} else {
println('Found ${services.len} service(s) in the default namespace:')
println('')
for i, svc in services {
println('')
println(' Service #${i + 1:-54}')
println('')
println(' Name: ${svc.name:-48}')
println(' Namespace: ${svc.namespace:-48}')
println(' Type: ${svc.service_type:-48}')
println(' Cluster IP: ${svc.cluster_ip:-48}')
if svc.external_ip.len > 0 {
println(' External IP: ${svc.external_ip:-48}')
}
if svc.ports.len > 0 {
println(' Ports: ${svc.ports.join(', '):-48}')
}
println(' Created: ${svc.created_at:-48}')
if svc.labels.len > 0 {
println(' Labels: ')
for key, value in svc.labels {
label_str := ' ${key}=${value}'
println(' ${label_str:-58}')
}
}
println('')
println('')
}
}
// ============================================================================
// Summary
// ============================================================================
console.print_header('Summary')
println(' Successfully demonstrated Kubernetes client functionality')
println(' Cluster information retrieved and parsed')
println(' Pods: ${pods.len} found')
println(' Deployments: ${deployments.len} found')
println(' Services: ${services.len} found')
println('')
println('All JSON parsing operations completed successfully!')
println('')
println('')
println(' Example Complete ')
println('')

View File

@@ -1,31 +0,0 @@
{
"folders": [
{
"path": "."
},
],
"settings": {
"extensions.ignoreRecommendations": false
},
"extensions": {
"unwantedRecommendations": [],
"recommendations": [
"saoudrizwan.claude-dev",
"shakram02.bash-beautify",
"ms-vsliveshare.vsliveshare",
"yzhang.markdown-all-in-one",
"elesoho.vscode-markdown-paste-image",
"ms-vscode-remote.remote-ssh",
"ms-vscode-remote.remote-ssh-edit",
"ms-vscode.remote-explorer",
"ms-vscode.remote-repositories",
"charliermarsh.ruff",
"bmalehorn.shell-syntax",
"qwtel.sqlite-viewer",
"simonsiefke.svg-preview",
"gruntfuggly.todo-tree",
"vosca.vscode-v-analyzer",
"tomoki1207.pdf"
]
}
}

View File

@@ -45,7 +45,7 @@ fn addtoscript(tofind string, toadd string) ! {
// Reset symlinks (cleanup)
println('Resetting all symlinks...')
os.rm('${os.home_dir()}/.vmodules/incubaid/herolib') or {}
os.rm('${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or {}
os.rm('${os.home_dir()}/.vmodules/incubaid/herolib') or {}
// Create necessary directories
os.mkdir_all('${os.home_dir()}/.vmodules/incubaid') or {

36
lib/ai/client/README.md Normal file
View File

@@ -0,0 +1,36 @@
# AIClient Factory
This directory contains the implementation of the `AIClient` factory, which provides a unified interface for interacting with various Large Language Model (LLM) providers such as Groq and OpenRouter. It leverages the existing OpenAI client infrastructure to abstract away the differences between providers.
## File Structure
- [`aiclient.v`](lib/ai/client/aiclient.v): The main factory and core functions for the `AIClient`.
- [`aiclient_models.v`](lib/ai/client/aiclient_models.v): Defines LLM model enums and their mapping to specific model names and API base URLs.
- [`aiclient_llm.v`](lib/ai/client/aiclient_llm.v): Handles the initialization of various LLM provider clients.
- [`aiclient_embed.v`](lib/ai/client/aiclient_embed.v): Provides functions for generating embeddings using the configured LLM models.
- [`aiclient_write.v`](lib/ai/client/aiclient_write.v): Implements complex file writing logic, including backup, AI-driven modification, content validation, and retry mechanisms.
- [`aiclient_validate.v`](lib/ai/client/aiclient_validate.v): Contains placeholder functions for validating different file types (Vlang, Markdown, YAML, JSON).
## Usage
To use the `AIClient`, you first need to initialize it:
```v
import aiclient
mut client := aiclient.new()!
```
Ensure that the necessary environment variables (`GROQKEY` and `OPENROUTER_API_KEY`) are set for the LLM providers.
## Environment Variables
- `GROQKEY`: API key for Groq.
- `OPENROUTER_API_KEY`: API key for OpenRouter.
## Key Features
```bash
v install prantlf.yaml
v install markdown
```

15
lib/ai/client/aiclient.v Normal file
View File

@@ -0,0 +1,15 @@
module client
@[heap]
pub struct AIClient {
pub mut:
llms AIClientLLMs
// Add other fields as needed
}
pub fn new() !AIClient {
llms := llms_init()!
return AIClient{
llms: llms
}
}

View File

@@ -0,0 +1,5 @@
module client
// pub fn (mut ac AIClient) embed(txt string) ![]f32 {
// return ac.llms.llm_embed.embeddings(txt)!
// }

View File

@@ -0,0 +1,122 @@
module client
import incubaid.herolib.clients.openai
import os
pub struct AIClientLLMs {
pub mut:
llm_maverick &openai.OpenAI
llm_qwen &openai.OpenAI
llm_120b &openai.OpenAI
llm_best &openai.OpenAI
llm_flash &openai.OpenAI
llm_pro &openai.OpenAI
llm_morph &openai.OpenAI
llm_embed &openai.OpenAI
llm_local &openai.OpenAI
llm_embed_local &openai.OpenAI
}
// Initialize all LLM clients
pub fn llms_init() !AIClientLLMs {
groq_key := os.getenv('GROQKEY')
if groq_key.len == 0 {
return error('GROQKEY environment variable not set')
}
openrouter_key := os.getenv('OPENROUTER_API_KEY')
if openrouter_key.len == 0 {
return error('OPENROUTER_API_KEY environment variable not set')
}
mut maverick_client := openai.OpenAI{
name: 'maverick'
api_key: groq_key
url: 'https://api.groq.com/openai/v1'
model_default: 'meta-llama/llama-4-maverick-17b-128e-instruct'
}
openai.set(maverick_client)!
mut qwen_client := openai.OpenAI{
name: 'qwen'
api_key: groq_key
url: 'https://api.groq.com/openai/v1'
model_default: 'qwen/qwen3-32b'
}
openai.set(qwen_client)!
mut llm_120b_client := openai.OpenAI{
name: 'llm_120b'
api_key: groq_key
url: 'https://api.groq.com/openai/v1'
model_default: 'openai/gpt-oss-120b'
}
openai.set(llm_120b_client)!
mut best_client := openai.OpenAI{
name: 'best'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'anthropic/claude-haiku-4.5'
}
openai.set(best_client)!
mut flash_client := openai.OpenAI{
name: 'flash'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'google/gemini-2.5-flash'
}
openai.set(flash_client)!
mut pro_client := openai.OpenAI{
name: 'pro'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'google/gemini-3.0-pro'
}
openai.set(pro_client)!
mut morph_client := openai.OpenAI{
name: 'morph'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'morph/morph-v3-fast'
}
openai.set(morph_client)!
mut embed_client := openai.OpenAI{
name: 'embed'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'qwen/qwen3-embedding-0.6b'
}
openai.set(embed_client)!
mut local_client := openai.OpenAI{
name: 'local'
url: 'http://localhost:1234/v1'
model_default: 'google/gemma-3-12b'
}
openai.set(local_client)!
mut local_embed_client := openai.OpenAI{
name: 'embedlocal'
url: 'http://localhost:1234/v1'
model_default: 'text-embedding-nomic-embed-text-v1.5:2'
}
openai.set(local_embed_client)!
return AIClientLLMs{
llm_maverick: openai.get(name: 'maverick')!
llm_qwen: openai.get(name: 'qwen')!
llm_120b: openai.get(name: 'llm_120b')!
llm_best: openai.get(name: 'best')!
llm_flash: openai.get(name: 'flash')!
llm_pro: openai.get(name: 'pro')!
llm_morph: openai.get(name: 'morph')!
llm_embed: openai.get(name: 'embed')!
llm_local: openai.get(name: 'local')!
llm_embed_local: openai.get(name: 'embedlocal')!
}
}

View File

@@ -0,0 +1,28 @@
module client
pub enum LLMEnum {
maverick
qwen
embed
llm_120b
best
flash
pro
morph
local
}
fn llm_to_model_url(model LLMEnum) !(string, string) {
// Returns tuple: (model_name, base_url)
return match model {
.maverick { 'meta-llama/llama-4-maverick-17b-128e-instruct', 'https://api.groq.com/openai/v1' }
.qwen { 'qwen/qwen3-32b', 'https://api.groq.com/openai/v1' }
.embed { 'qwen/qwen3-embedding-0.6b', 'https://api.openrouter.ai/api/v1' }
.llm_120b { 'openai/gpt-oss-120b', 'https://api.groq.com/openai/v1' }
.best { 'anthropic/claude-haiku-4.5', 'https://api.openrouter.ai/api/v1' }
.flash { 'google/gemini-2.5-flash', 'https://api.openrouter.ai/api/v1' }
.pro { 'google/gemini-2.5-pro', 'https://api.openrouter.ai/api/v1' }
.morph { 'morph/morph-v3-fast', 'https://api.openrouter.ai/api/v1' }
.local { 'google/gemma-3-12b', 'http://localhost:1234/v1' }
}
}

View File

@@ -0,0 +1,44 @@
module client
import incubaid.herolib.core.pathlib
import markdown
import os
import prantlf.yaml { parse_text }
import x.json2
pub fn validate_vlang_content(path pathlib.Path) !string {
// Use `v fmt -check` to validate V language syntax
// If there are any formatting issues, `v fmt -check` will return a non-zero exit code
// and print the issues to stderr.
res := os.system('v fmt -check "${path.str()}" 2>/dev/null')
if res != 0 {
return 'V language syntax validation failed. File has formatting or syntax errors.'
}
return '' // empty means no error
}
pub fn validate_markdown_content(path_ pathlib.Path) !string {
// Implement Markdown validation by attempting to convert to HTML
// If there's an error during conversion, it indicates invalid Markdown.
mut mypath := path_
content := mypath.read() or { return 'Failed to read markdown file: ${err}' }
mut xx := markdown.HtmlRenderer{}
_ := markdown.render(content, mut xx) or { return 'Invalid Markdown content: ${err}' }
return '' // empty means no error
}
pub fn validate_yaml_content(path_ pathlib.Path) !string {
// Implement YAML validation by attempting to load the content
mut mypath := path_
content := mypath.read() or { return 'Failed to read YAML file: ${err}' }
_ := parse_text(content) or { return 'Invalid YAML content: ${err}' }
return '' // empty means no error
}
pub fn validate_json_content(path_ pathlib.Path) !string {
// Implement JSON validation by attempting to decode the content
mut mypath := path_
content := mypath.read() or { return 'Failed to read JSON file: ${err}' }
json2.decode[json2.Any](content) or { return 'Invalid JSON content: ${err}' }
return '' // empty means no error
}

View File

@@ -0,0 +1,177 @@
module client
import incubaid.herolib.core.pathlib
import incubaid.herolib.ui.console
import incubaid.herolib.clients.openai
import os
// WritePromptArgs holds the parameters for write_from_prompt function
@[params]
pub struct WritePromptArgs {
pub mut:
path pathlib.Path
prompt string
models []LLMEnum = [.best]
temperature f64 = 0.5
max_tokens int = 16000
system_prompt string = 'You are a helpful assistant that modifies files based on user instructions.'
}
// write_from_prompt modifies a file based on AI-generated modification instructions
//
// The process:
// 1. Uses the first model to generate modification instructions from the prompt
// 2. Uses the morph model to apply those instructions to the original content
// 3. Validates the result based on file type (.v, .md, .yaml, .json)
// 4. On validation failure, retries with the next model in the list
// 5. Restores from backup if all models fail
pub fn (mut ac AIClient) write_from_prompt(args WritePromptArgs) ! {
mut mypath := args.path
original_content := mypath.read()!
mut backup_path := pathlib.get_file(path: '${mypath.path}.backup', create: true)!
backup_path.write(original_content)!
mut selected_models := args.models.clone()
if selected_models.len == 0 {
selected_models = [.best]
}
for model_enum in selected_models {
model_name, _ := llm_to_model_url(model_enum)!
// Step 1: Get modification instructions from the selected model
// Get the appropriate LLM client for instruction generation
mut llm_client := get_llm_client(mut ac, model_enum)
instruction_prompt := generate_instruction_prompt(original_content, mypath.ext()!,
args.prompt)
instructions_response := llm_client.chat_completion(
message: instruction_prompt
temperature: args.temperature
max_completion_tokens: args.max_tokens
)!
instructions := instructions_response.result.trim_space()
// Step 2: Use morph model to apply instructions to original content
morph_prompt := generate_morph_prompt(original_content, instructions)
morph_response := ac.llms.llm_morph.chat_completion(
message: morph_prompt
temperature: args.temperature
max_completion_tokens: args.max_tokens
)!
new_content := morph_response.result.trim_space()
// Step 3: Validate content based on file extension
mut validation_error := ''
// Create a temporary file for validation
file_ext := mypath.ext()!
mut temp_path := pathlib.get_file(
path: '${mypath.path}.validate_temp${file_ext}'
create: true
)!
temp_path.write(new_content)!
match file_ext {
'.v' {
validation_error = validate_vlang_content(temp_path)!
}
'.md' {
validation_error = validate_markdown_content(temp_path)!
}
'.yaml', '.yml' {
validation_error = validate_yaml_content(temp_path)!
}
'.json' {
validation_error = validate_json_content(temp_path)!
}
else {
// No specific validation for other file types
}
}
// Clean up temporary validation file
if temp_path.exists() {
temp_path.delete()!
}
if validation_error == '' {
// Validation passed - write new content
mypath.write(new_content)!
backup_path.delete()! // Remove backup on success
console.print_stdout(' Successfully modified ${mypath.str()} using model ${model_name}')
return
} else {
console.print_stderr(' Validation failed for model ${model_name}. Error: ${validation_error}. Trying next model...')
}
}
// Step 4: If all models fail, restore backup and error
original_backup := backup_path.read()!
mypath.write(original_backup)!
backup_path.delete()!
return error('All models failed to generate valid content. Original file restored from backup.')
}
// get_llm_client returns the appropriate LLM client for the given model enum
fn get_llm_client(mut ac AIClient, model LLMEnum) &openai.OpenAI {
return match model {
.maverick { ac.llms.llm_maverick }
.qwen { ac.llms.llm_qwen }
.embed { ac.llms.llm_embed }
.llm_120b { ac.llms.llm_120b }
.best { ac.llms.llm_best }
.flash { ac.llms.llm_flash }
.pro { ac.llms.llm_pro }
.morph { ac.llms.llm_morph }
.local { ac.llms.llm_local }
}
}
// generate_instruction_prompt creates the prompt for generating modification instructions
fn generate_instruction_prompt(content string, file_ext string, user_prompt string) string {
return 'You are a file modification assistant specializing in ${file_ext} files.
The user will provide a file and a modification request. Your task is to analyze the request and respond with ONLY clear, concise modification instructions.
Do NOT apply the modifications yourself. Just provide step-by-step instructions that could be applied to transform the file.
Original file content:
\`\`\`${file_ext}
${content}
\`\`\`
File type: ${file_ext}
User modification request:
${user_prompt}
Provide only the modification instructions. Be specific and clear. Format your response as a numbered list of changes to make.'
}
// generate_morph_prompt creates the prompt for the morph model to apply instructions
fn generate_morph_prompt(original_content string, instructions string) string {
return 'You are an expert code and file modifier. Your task is to apply modification instructions to existing file content.
Take the original file content and the modification instructions, then generate the modified version.
IMPORTANT: Return ONLY the modified file content. Do NOT include:
- Markdown formatting or code blocks
- Explanations or commentary
- "Here is the modified file:" prefixes
- Any text other than the actual modified content
Original file content:
\`\`\`
${original_content}
\`\`\`
Modification instructions to apply:
${instructions}
Return the complete modified file content:'
}

View File

@@ -0,0 +1,53 @@
use lib/clients/openai
make a factory called AIClient
we make multiple clients on it
- aiclient.llm_maverick = now use openai client to connect to groq and use model: meta-llama/llama-4-maverick-17b-128e-instruct
- aiclient.llm_qwen = now use openai client to connect to groq and use model: qwen/qwen3-32b
- aiclient.llm_embed = now use openai client to connect to openrouter and use model: qwen/qwen3-embedding-0.6b
- aiclient.llm_120b = now use openai client to connect to groq and use model: openai/gpt-oss-120b
- aiclient.llm_best = now use openai client to connect to openrouter and use model: anthropic/claude-haiku-4.5
- aiclient.llm_flash = now use openai client to connect to openrouter and use model: google/gemini-2.5-flash
- aiclient.llm_pro = now use openai client to connect to openrouter and use model: google/gemini-2.5-pro
- aiclient.morph = now use openai client to connect to openrouter and use model: morph/morph-v3-fast
## for groq
- baseURL: "https://api.groq.com/openai/v1" is already somewhere in client implementation of openai, it asks for env key
## for openrouter
- is in client known, check implementation
## model enum
- LLMEnum ... maverick, qwen, 120b, best, flash, pro
## now for client make simply functions
- embed(txt) -> embeddings ...
- write_from_prompt(path:Path, prompt: str,models=[]LLMEnum)!
- execute the prompt use first model, at end of prompt add instructions to make sure we only return clear instructions for modifying the path which is passed in, and only those instructions need to be returned
- use morph model to start from original content, and new instructions, to get the content we need to write (morph model puts it together)
- make a backup of the original content to a temporary file with .backup so we can roll back to original
- write the morphed content to the path
- check if file ends with .md, .v, .yaml or .json if yes we need to validate the content
- if file ends with .md, validate markdown content
- if file ends with .v, validate vlang code
- if file ends with .yaml, validate yaml content
- if file ends with .json, validate json content
- validate_vlang_content(path: Path) -> bool:
- validate vlang code content
- validate_markdown_content(path: Path) -> bool:
- validate markdown content
- validate_yaml_content(path: Path) -> bool:
- validate yaml content
- validate_json_content(path: Path) -> bool:
- validate json content
- for now the validate functions do nothing, just place holders
- if validation ok then remoeve .backup and return
- if not ok, then restore the original, and restart use 2e model from models, and try again, do till all models tried
- if at end of what we can try, then raise an error and restore the original content

141
lib/ai/filemap/README.md Normal file
View File

@@ -0,0 +1,141 @@
# filemap Module
Parse directories or formatted strings into file maps with automatic ignore pattern support.
## Features
- 📂 Walk directories recursively and build file maps
- 🚫 Respect `.gitignore` and `.heroignore` ignore patterns with directory scoping
- 📝 Parse custom `===FILE:name===` format into file maps
- 📦 Export/write file maps to disk
- 🛡️ Robust, defensive parsing (handles spaces, variable `=` length, case-insensitive)
## Quick Start
### From Directory Path
```v
import incubaid.herolib.lib.ai.filemap
mut cw := filemap.new()
mut fm := cw.filemap_get(path: '/path/to/project')!
// Iterate files
for path, content in fm.content {
println('${path}: ${content.len} bytes')
}
```
### From Formatted String
```v
content_str := '
===FILE:main.v===
fn main() {
println("Hello!")
}
===FILE:utils/helper.v===
pub fn help() {}
===END===
'
mut cw := filemap.new()
mut fm := cw.parse(content_str)!
println(fm.get('main.v')!)
```
## FileMap Operations
```v
// Get file content
content := fm.get('path/to/file.txt')!
// Set/modify file
fm.set('new/file.txt', 'content here')
// Find files by prefix
files := fm.find('src/')
// Export to directory
fm.export('/output/dir')!
// Write updates to directory
fm.write('/project/dir')!
// Convert back to formatted string
text := fm.content()
```
## File Format
### Full Files
```
===FILE:path/to/file.txt===
File content here
Can span multiple lines
===END===
```
### Partial Content (for future morphing)
```
===FILECHANGE:src/models.v===
struct User {
id int
}
===END===
```
### Both Together
```
===FILE:main.v===
fn main() {}
===FILECHANGE:utils.v===
fn helper() {}
===END===
```
## Parsing Robustness
Parser handles variations:
```
===FILE:name.txt=== // Standard
== FILE : name.txt ==
===file:name.txt=== // Lowercase
==FILE:name.txt== // Different = count
```
## Error Handling
Errors are collected in `FileMap.errors`:
```v
mut fm := cw.filemap_get(content: str)!
if fm.errors.len > 0 {
for err in fm.errors {
println('Line ${err.linenr}: ${err.message}')
}
}
```
## Ignore Patterns
- Respects `.gitignore` and `.heroignore` in any parent directory
- Default patterns include `.git/`, `node_modules/`, `*.pyc`, etc.
- Use `/` suffix for directory patterns: `dist/`
- Use `*` for wildcards: `*.log`
- Lines starting with `#` are comments
Example `.heroignore`:
```
build/
*.tmp
.env
__pycache__/
```

24
lib/ai/filemap/factory.v Normal file
View File

@@ -0,0 +1,24 @@
module filemap
@[params]
pub struct FileMapArgs {
pub mut:
path string
content string
content_read bool = true // If false, file content not read from disk
// Include if matches any wildcard pattern (* = any sequence)
filter []string
// Exclude if matches any wildcard pattern
filter_ignore []string
}
// filemap_get creates FileMap from path or content string
pub fn filemap(args FileMapArgs) !FileMap {
if args.path != '' {
return filemap_get_from_path(args.path, args.content_read)!
} else if args.content != '' {
return filemap_get_from_content(args.content)!
} else {
return error('Either path or content must be provided')
}
}

View File

@@ -1,15 +1,17 @@
module codewalker
module filemap
import incubaid.herolib.core.pathlib
// FileMap represents parsed file structure with content and changes
pub struct FileMap {
pub mut:
source string
content map[string]string
content_change map[string]string
errors []FMError
source string // Source path or origin
content map[string]string // Full file content by path
content_change map[string]string // Partial/change content by path
errors []FMError // Parse errors encountered
}
// content generates formatted string representation
pub fn (mut fm FileMap) content() string {
mut out := []string{}
for filepath, filecontent in fm.content {
@@ -24,7 +26,7 @@ pub fn (mut fm FileMap) content() string {
return out.join_lines()
}
// write in new location, all will be overwritten, will only work with full files, not changes
// export writes all FILE content to destination directory
pub fn (mut fm FileMap) export(path string) ! {
for filepath, filecontent in fm.content {
dest := '${path}/${filepath}'
@@ -33,7 +35,7 @@ pub fn (mut fm FileMap) export(path string) ! {
}
}
@[PARAMS]
@[params]
pub struct WriteParams {
path string
v_test bool = true
@@ -41,29 +43,31 @@ pub struct WriteParams {
python_test bool
}
// update the files as found in the folder and update them or create
// write updates files in destination directory (creates or overwrites)
pub fn (mut fm FileMap) write(path string) ! {
for filepath, filecontent in fm.content {
dest := '${path}/${filepath}'
// In future: validate language-specific formatting/tests before overwrite
mut filepathtowrite := pathlib.get_file(path: dest, create: true)!
filepathtowrite.write(filecontent)!
}
// TODO: phase 2, work with morphe to integrate change in the file
}
// get retrieves file content by path
pub fn (fm FileMap) get(relpath string) !string {
return fm.content[relpath] or { return error('File not found: ${relpath}') }
}
// set stores file content by path
pub fn (mut fm FileMap) set(relpath string, content string) {
fm.content[relpath] = content
}
// delete removes file from content map
pub fn (mut fm FileMap) delete(relpath string) {
fm.content.delete(relpath)
}
// find returns all paths matching prefix
pub fn (fm FileMap) find(path string) []string {
mut result := []string{}
for filepath, _ in fm.content {

View File

@@ -0,0 +1,363 @@
module filemap
import os
import incubaid.herolib.core.pathlib
fn test_parse_header_file() {
kind, name := parse_header('===FILE:main.v===')!
assert kind == BlockKind.file
assert name == 'main.v'
}
fn test_parse_header_file2() {
kind, name := parse_header('===FILE:main.v ===')!
assert kind == BlockKind.file
assert name == 'main.v'
}
fn test_parse_header_file3() {
kind, name := parse_header('=== FILE:main.v ===')!
assert kind == BlockKind.file
assert name == 'main.v'
}
fn test_parse_header_file4() {
kind, name := parse_header('== FILE: main.v =====')!
assert kind == BlockKind.file
assert name == 'main.v'
}
fn test_parse_header_filechange() {
kind, name := parse_header('===FILECHANGE:utils/helper.v===')!
assert kind == BlockKind.filechange
assert name == 'utils/helper.v'
}
fn test_parse_header_end() {
kind, _ := parse_header('===END===')!
assert kind == BlockKind.end
}
fn test_parse_header_with_spaces() {
kind, name := parse_header(' === FILE : config.yaml === ')!
assert kind == BlockKind.file
assert name == 'config.yaml'
}
fn test_parse_header_lowercase() {
kind, name := parse_header('===file:test.txt===')!
assert kind == BlockKind.file
assert name == 'test.txt'
}
fn test_parse_header_variable_equals() {
kind, name := parse_header('=FILE:path/file.v=')!
assert kind == BlockKind.file
assert name == 'path/file.v'
}
fn test_parse_header_end_lowercase() {
kind, _ := parse_header('===end===')!
assert kind == BlockKind.end
}
fn test_filemap_from_simple_content() {
content := '===FILE:main.v===
fn main() {
println("Hello, World!")
}
===END==='
fm := filemap_get_from_content(content)!
assert fm.content.len == 1
assert 'main.v' in fm.content
assert fm.content['main.v'].contains('println')
}
fn test_filemap_from_multiple_files() {
content := '===FILE:main.v===
fn main() {
println("Hello")
}
===FILE:utils/helper.v===
pub fn help() {
println("Helping")
}
===END==='
fm := filemap_get_from_content(content)!
assert fm.content.len == 2
assert 'main.v' in fm.content
assert 'utils/helper.v' in fm.content
}
fn test_filemap_with_filechange() {
content := '===FILE:config.v===
pub const version = "1.0"
===FILECHANGE:main.v===
fn main() {
println(version)
}
===END==='
fm := filemap_get_from_content(content)!
assert fm.content.len == 1
assert fm.content_change.len == 1
assert 'config.v' in fm.content
assert 'main.v' in fm.content_change
}
fn test_filemap_multiline_content() {
content := '===FILE:multiline.txt===
Line 1
Line 2
Line 3
===FILE:another.txt===
Another content
===END==='
fm := filemap_get_from_content(content)!
assert fm.content['multiline.txt'].contains('Line 1')
assert fm.content['multiline.txt'].contains('Line 2')
assert fm.content['multiline.txt'].contains('Line 3')
assert fm.content['another.txt'] == 'Another content'
}
fn test_filemap_get_method() {
content := '===FILE:test.v===
test content
===END==='
fm := filemap_get_from_content(content)!
result := fm.get('test.v')!
assert result == 'test content'
}
fn test_filemap_get_not_found() {
content := '===FILE:test.v===
content
===END==='
fm := filemap_get_from_content(content)!
result := fm.get('nonexistent.v') or {
assert err.msg().contains('File not found')
return
}
panic('Should have returned error')
}
fn test_filemap_set_method() {
mut fm := FileMap{}
fm.set('new/file.v', 'new content')
assert fm.content['new/file.v'] == 'new content'
}
fn test_filemap_delete_method() {
mut fm := FileMap{}
fm.set('file1.v', 'content1')
fm.set('file2.v', 'content2')
assert fm.content.len == 2
fm.delete('file1.v')
assert fm.content.len == 1
assert 'file2.v' in fm.content
assert 'file1.v' !in fm.content
}
fn test_filemap_find_method() {
mut fm := FileMap{}
fm.set('src/main.v', 'main')
fm.set('src/utils/helper.v', 'helper')
fm.set('test/test.v', 'test')
results := fm.find('src/')
assert results.len == 2
assert 'src/main.v' in results
assert 'src/utils/helper.v' in results
}
fn test_filemap_find_empty() {
mut fm := FileMap{}
fm.set('main.v', 'main')
results := fm.find('src/')
assert results.len == 0
}
fn test_filemap_from_path() {
// Create temporary test directory
tmpdir := os.temp_dir() + '/test_filemap_${os.getpid()}'
os.mkdir_all(tmpdir) or { panic(err) }
defer {
os.rmdir_all(tmpdir) or {}
}
// Create test files
os.mkdir_all('${tmpdir}/src') or { panic(err) }
os.mkdir_all('${tmpdir}/test') or { panic(err) }
os.write_file('${tmpdir}/main.v', 'fn main() {}')!
os.write_file('${tmpdir}/src/utils.v', 'pub fn help() {}')!
os.write_file('${tmpdir}/test/test.v', 'fn test() {}')!
fm := filemap_get_from_path(tmpdir, true)!
assert fm.content.len >= 3
assert 'main.v' in fm.content
assert fm.content['main.v'] == 'fn main() {}'
}
fn test_filemap_from_path_no_content() {
tmpdir := os.temp_dir() + '/test_filemap_nocontent_${os.getpid()}'
os.mkdir_all(tmpdir) or { panic(err) }
defer {
os.rmdir_all(tmpdir) or {}
}
os.mkdir_all('${tmpdir}/src') or { panic(err) }
os.write_file('${tmpdir}/main.v', 'fn main() {}')!
fm := filemap_get_from_path(tmpdir, false)!
assert fm.content.len >= 1
assert 'main.v' in fm.content
assert fm.content['main.v'] == ''
}
fn test_filemap_from_path_not_exists() {
result := filemap_get_from_path('/nonexistent/path/12345', true) or {
assert err.msg().contains('does not exist')
return
}
panic('Should have returned error for nonexistent path')
}
fn test_filemap_content_string() {
mut fm := FileMap{}
fm.set('file1.v', 'content1')
fm.set('file2.v', 'content2')
output := fm.content()
assert output.contains('===FILE:file1.v===')
assert output.contains('content1')
assert output.contains('===FILE:file2.v===')
assert output.contains('content2')
assert output.contains('===END===')
}
fn test_filemap_export() {
tmpdir := os.temp_dir() + '/test_filemap_export_${os.getpid()}'
os.mkdir_all(tmpdir) or { panic(err) }
defer {
os.rmdir_all(tmpdir) or {}
}
mut fm := FileMap{}
fm.set('main.v', 'fn main() {}')
fm.set('src/helper.v', 'pub fn help() {}')
fm.export(tmpdir)!
assert os.exists('${tmpdir}/main.v')
assert os.exists('${tmpdir}/src/helper.v')
assert os.read_file('${tmpdir}/main.v')! == 'fn main() {}'
}
fn test_filemap_write() {
tmpdir := os.temp_dir() + '/test_filemap_write_${os.getpid()}'
os.mkdir_all(tmpdir) or { panic(err) }
defer {
os.rmdir_all(tmpdir) or {}
}
mut fm := FileMap{}
fm.set('config.v', 'const version = "1.0"')
fm.set('models/user.v', 'struct User {}')
fm.write(tmpdir)!
assert os.exists('${tmpdir}/config.v')
assert os.exists('${tmpdir}/models/user.v')
}
fn test_filemap_factory_from_path() {
tmpdir := os.temp_dir() + '/test_factory_path_${os.getpid()}'
os.mkdir_all(tmpdir) or { panic(err) }
defer {
os.rmdir_all(tmpdir) or {}
}
os.write_file('${tmpdir}/test.v', 'fn test() {}')!
fm := filemap(path: tmpdir, content_read: true)!
assert 'test.v' in fm.content
}
fn test_filemap_factory_from_content() {
content := '===FILE:sample.v===
fn main() {}
===END==='
fm := filemap(content: content)!
assert 'sample.v' in fm.content
}
fn test_filemap_factory_requires_input() {
result := filemap(path: '', content: '') or {
assert err.msg().contains('Either path or content')
return
}
panic('Should have returned error')
}
fn test_filemap_parse_errors_content_before_file() {
content := 'Some text before file
===FILE:main.v===
content
===END==='
fm := filemap_get_from_content(content)!
assert fm.errors.len > 0
assert fm.errors[0].category == 'parse'
}
fn test_filemap_parse_errors_end_without_file() {
content := '===END==='
fm := filemap_get_from_content(content)!
assert fm.errors.len > 0
}
fn test_filemap_empty_content() {
content := ''
fm := filemap_get_from_content(content)!
assert fm.content.len == 0
}
fn test_filemap_complex_filenames() {
content := '===FILE:src/v_models/user_model.v===
pub struct User {}
===FILE:test/unit/user_test.v===
fn test_user() {}
===FILE:.config/settings.json===
{ "key": "value" }
===END==='
fm := filemap_get_from_content(content)!
assert 'src/v_models/user_model.v' in fm.content
assert 'test/unit/user_test.v' in fm.content
assert '.config/settings.json' in fm.content
}
fn test_filemap_whitespace_preservation() {
content := '===FILE:formatted.txt===
Line with spaces
Tab indented
Spaces indented
===END==='
fm := filemap_get_from_content(content)!
file_content := fm.content['formatted.txt']
assert file_content.contains(' spaces')
assert file_content.contains('\t')
}

114
lib/ai/filemap/ignore.v Normal file
View File

@@ -0,0 +1,114 @@
module filemap
import arrays
import os
import incubaid.herolib.core.pathlib
// Default ignore patterns based on .gitignore conventions
const default_gitignore = '
.git/
.svn/
.hg/
.bzr/
node_modules/
__pycache__/
*.py[cod]
*.so
.Python
build/
develop-eggs/
downloads/
eggs/
.eggs/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
.env
.venv
venv/
.tox/
.nox/
.coverage
.coveragerc
coverage.xml
*.cover
*.gem
*.pyc
.cache
.pytest_cache/
.mypy_cache/
.hypothesis/
.DS_Store
Thumbs.db
*.tmp
*.temp
*.log
'
// find_ignore_patterns collects all .gitignore patterns from current directory up to repository root
//
// Walks up the directory tree using parent_find_advanced to locate all .gitignore files,
// stopping when it encounters the .git directory (repository root).
// Patterns are collected from:
// 1. Default ignore patterns (built-in)
// 2. All .gitignore files found from current directory to repository root
// 3. Filter out comments (lines starting with '#') and empty lines
//
// Parameters:
// - start_path: Optional starting directory path (defaults to current working directory if empty)
//
// Returns:
// - Combined, sorted, unique ignore patterns from all sources
// - Error if path operations fail (file not found, permission denied, etc.)
//
// Examples:
// // Use current working directory
// patterns := find_ignore_patterns()!
//
// // Use specific project directory
// patterns := find_ignore_patterns('/home/user/myproject')!
pub fn find_ignore_patterns(start_path string) ![]string {
mut patterns := default_gitignore.split_into_lines()
// Use provided path or current working directory
mut search_from := start_path
if search_from == '' { // If an empty string was passed for start_path, use current working directory
search_from = os.getwd()
}
mut current_path := pathlib.get(search_from)
// Find all .gitignore files up the tree until we hit .git directory (repo root)
mut gitignore_paths := current_path.parent_find_advanced('.gitignore', '.git')!
// Read and collect patterns from all found .gitignore files
for mut gitignore_path in gitignore_paths {
if gitignore_path.is_file() {
content := gitignore_path.read() or {
// Skip files that can't be read (permission issues, etc.)
continue
}
gitignore_lines := content.split_into_lines()
for line in gitignore_lines {
trimmed := line.trim_space()
// Skip empty lines and comment lines
if trimmed != '' && !trimmed.starts_with('#') {
patterns << trimmed
}
}
}
}
// Sort and get unique patterns to remove duplicates
patterns.sort()
patterns = arrays.uniq(patterns)
return patterns
}

127
lib/ai/filemap/loaders.v Normal file
View File

@@ -0,0 +1,127 @@
module filemap
import incubaid.herolib.core.pathlib
// filemap_get_from_path reads directory and creates FileMap, respecting ignore patterns
fn filemap_get_from_path(path string, content_read bool) !FileMap {
mut dir := pathlib.get(path)
if !dir.exists() || !dir.is_dir() {
return error('Directory "${path}" does not exist')
}
mut fm := FileMap{
source: path
}
ignore_patterns := find_ignore_patterns(path)!
// List all files using pathlib with both default and custom ignore patterns
mut file_list := dir.list(
recursive: true
filter_ignore: ignore_patterns
)!
for mut file in file_list.paths {
if file.is_file() {
relpath := file.path_relative(path)!
if content_read {
content := file.read()!
fm.content[relpath] = content
} else {
fm.content[relpath] = ''
}
}
}
return fm
}
// filemap_get_from_content parses FileMap from string with ===FILE:name=== format
fn filemap_get_from_content(content string) !FileMap {
mut fm := FileMap{}
mut current_kind := BlockKind.end
mut filename := ''
mut block := []string{}
mut had_any_block := false
mut linenr := 0
for line in content.split_into_lines() {
linenr += 1
parsed_kind, parsed_name := parse_header(line)! // Call parse_header with the raw line
mut is_a_header_line := false
if parsed_kind == .file || parsed_kind == .filechange {
is_a_header_line = true
} else if parsed_kind == .end && line.trim_space().to_lower() == '===end===' {
// This is explicitly an END header
is_a_header_line = true
}
if is_a_header_line {
// Handle the header line (logic similar to current .file, .filechange, and .end blocks)
if parsed_kind == .end { // It's the explicit ===END===
if filename == '' {
if had_any_block {
fm.errors << FMError{
message: 'Unexpected END marker without active block'
linenr: linenr
category: 'parse'
}
} else {
fm.errors << FMError{
message: 'END found before any FILE block'
linenr: linenr
category: 'parse'
}
}
} else {
// Store current block
match current_kind {
.file { fm.content[filename] = block.join_lines() }
.filechange { fm.content_change[filename] = block.join_lines() }
else {}
}
filename = ''
block = []string{}
current_kind = .end
}
} else { // It's a FILE or FILECHANGE header
// Flush previous block if any
if filename != '' {
match current_kind {
.file { fm.content[filename] = block.join_lines() }
.filechange { fm.content_change[filename] = block.join_lines() }
else {}
}
}
filename = parsed_name
current_kind = parsed_kind
block = []string{}
had_any_block = true
}
} else {
// This is a content line (parse_header returned .end, but it wasn't '===END===')
if filename == '' && line.trim_space().len > 0 {
fm.errors << FMError{
message: "Content before first FILE block: '${line}'"
linenr: linenr
category: 'parse'
}
} else if filename != '' {
block << line
}
}
}
// Flush final block if any
if filename != '' {
match current_kind {
.file { fm.content[filename] = block.join_lines() }
.filechange { fm.content_change[filename] = block.join_lines() }
else {}
}
}
return fm
}

16
lib/ai/filemap/model.v Normal file
View File

@@ -0,0 +1,16 @@
module filemap
// BlockKind defines the type of block in parsed content
pub enum BlockKind {
file
filechange
end
}
pub struct FMError {
pub:
message string
linenr int
category string
filename string
}

44
lib/ai/filemap/parser.v Normal file
View File

@@ -0,0 +1,44 @@
module filemap
// parse_header robustly extracts block type and filename from header line
// Handles variable `=` count, spaces, and case-insensitivity
// Example: ` ===FILE: myfile.txt ===` $(BlockKind.file, "myfile.txt")
fn parse_header(line string) !(BlockKind, string) {
cleaned := line.trim_space()
// Must have = and content
if !cleaned.contains('=') {
return BlockKind.end, ''
}
// Strip leading and trailing = (any count), preserving spaces between
mut content := cleaned.trim_left('=').trim_space()
content = content.trim_right('=').trim_space()
if content.len == 0 {
return BlockKind.end, ''
}
// Check for END marker
if content.to_lower() == 'end' {
return BlockKind.end, ''
}
// Parse FILE or FILECHANGE
if content.contains(':') {
kind_str := content.all_before(':').to_lower().trim_space()
filename := content.all_after(':').trim_space()
if filename.len < 1 {
return error('Invalid filename: empty after colon')
}
match kind_str {
'file' { return BlockKind.file, filename }
'filechange' { return BlockKind.filechange, filename }
else { return BlockKind.end, '' }
}
}
return BlockKind.end, ''
}

View File

@@ -0,0 +1,7 @@
module instructions
import incubaid.herolib.core.texttools
__global (
instructions_cache map[string]string
)

Some files were not shown because too many files have changed in this diff Show More