This commit is contained in:
2025-12-01 16:45:47 +01:00
parent 5f9a95f2ca
commit 55966be158
72 changed files with 710 additions and 1233 deletions

View File

@@ -0,0 +1,95 @@
# AtlasClient
A simple API for accessing document collections exported by the `doctree` module.
## What It Does
AtlasClient provides methods to:
- List collections, pages, files, and images
- Check if resources exist
- Get file paths and content
- Access metadata (links, errors)
- Copy images from pages
## Quick Start
```v
import incubaid.herolib.web.doctree_client
// Create client, exports will be in $/hero/var/doctree_export by default
mut client := doctree_client.new()!
// List collections
collections := client.list_collections()!
// Get page content
content := client.get_page_content('my_collection', 'page_name')!
// Check for errors
if client.has_errors('my_collection')! {
errors := client.get_collection_errors('my_collection')!
}
```
## Export Structure
DocTree exports to this structure:
```txt
export_dir/
├── content/
│ └── collection_name/
│ ├── page.md
│ ├── image.png
│ └── file.pdf
└── meta/
└── collection_name.json
```
## Key Methods
**Collections:**
- `list_collections()` - List all collections
**Pages:**
- `list_pages(collection)` - List pages in collection
- `page_exists(collection, page)` - Check if page exists
- `get_page_content(collection, page)` - Get page markdown content
- `get_page_path(collection, page)` - Get page file path
**Files & Images:**
- `list_files(collection)` - List non-page, non-image files
- `list_images(collection)` - List image files
- `get_file_path(collection, file)` - Get file path
- `get_image_path(collection, image)` - Get image path
- `copy_images(collection, page, dest)` - Copy page images to dest/img/
- `copy_files(collection, page, dest)` - Copy page files to dest/files/
**Metadata:**
- `get_collection_metadata(collection)` - Get full metadata
- `get_page_links(collection, page)` - Get links from page
- `get_collection_errors(collection)` - Get collection errors
- `has_errors(collection)` - Check if collection has errors
## Naming Convention
Names are normalized using `name_fix()`:
- `My_Page-Name.md``my_page_name`
- Removes: dashes, special chars
- Converts to lowercase
- Preserves underscores
## Example
See `examples/data/doctree_client/basic_usage.vsh` for a complete working example.
## See Also
- `lib/data/doctree/` - DocTree module for exporting collections
- `lib/web/doctreeclient/` - Alternative client for doctree collections

View File

@@ -0,0 +1,313 @@
module client
import incubaid.herolib.core.pathlib
import incubaid.herolib.core.texttools
import incubaid.herolib.ui.console
import os
import json
import incubaid.herolib.core.redisclient
// AtlasClient provides access to DocTree-exported documentation collections
// It reads from both the exported directory structure and Redis metadata
pub struct AtlasClient {
pub mut:
redis &redisclient.Redis
export_dir string // Path to the doctree export directory (contains content/ and meta/)
}
// get_page_path returns the path for a page in a collection
// Pages are stored in {export_dir}/content/{collection}/{page}.md
pub fn (mut c AtlasClient) get_page_path(collection_name string, page_name string) !string {
// Apply name normalization
fixed_collection_name := texttools.name_fix(collection_name)
fixed_page_name := texttools.name_fix(page_name)
// Check if export directory exists
if !os.exists(c.export_dir) {
return error('export_dir_not_found: Export directory "${c.export_dir}" not found')
}
// Construct the page path
page_path := os.join_path(c.export_dir, 'content', fixed_collection_name, '${fixed_page_name}.md')
// Check if the page file exists
if !os.exists(page_path) {
return error('page_not_found: Page "${page_name}" not found in collection "${collection_name}"')
}
return page_path
}
// get_file_path returns the path for a file in a collection
// Files are stored in {export_dir}/content/{collection}/{filename}
pub fn (mut c AtlasClient) get_file_path(collection_name_ string, file_name_ string) !string {
collection_name := texttools.name_fix(collection_name_)
file_name := texttools.name_fix(file_name_)
// Check if export directory exists
if !os.exists(c.export_dir) {
return error('export_dir_not_found: Export directory "${c.export_dir}" not found')
}
// Construct the file path
file_path := os.join_path(c.export_dir, 'content', collection_name, 'files', file_name)
// Check if the file exists
if !os.exists(file_path) {
return error('file_not_found:"${file_path}" File "${file_name}" not found in collection "${collection_name}"')
}
return file_path
}
// get_image_path returns the path for an image in a collection
// Images are stored in {export_dir}/content/{collection}/{imagename}
pub fn (mut c AtlasClient) get_image_path(collection_name_ string, image_name_ string) !string {
// Apply name normalization
collection_name := texttools.name_fix(collection_name_)
// Images keep their original names with extensions
image_name := texttools.name_fix(image_name_)
// Check if export directory exists
if !os.exists(c.export_dir) {
return error('export_dir_not_found: Export directory "${c.export_dir}" not found')
}
// Construct the image path
image_path := os.join_path(c.export_dir, 'content', collection_name, 'img', image_name)
// Check if the image exists
if !os.exists(image_path) {
return error('image_not_found":"${image_path}" Image "${image_name}" not found in collection "${collection_name}"')
}
return image_path
}
// page_exists checks if a page exists in a collection
pub fn (mut c AtlasClient) page_exists(collection_name string, page_name string) bool {
// Try to get the page path - if it succeeds, the page exists
_ := c.get_page_path(collection_name, page_name) or { return false }
return true
}
// file_exists checks if a file exists in a collection
pub fn (mut c AtlasClient) file_exists(collection_name string, file_name string) bool {
// Try to get the file path - if it succeeds, the file exists
_ := c.get_file_path(collection_name, file_name) or { return false }
return true
}
// image_exists checks if an image exists in a collection
pub fn (mut c AtlasClient) image_exists(collection_name string, image_name string) bool {
// Try to get the image path - if it succeeds, the image exists
_ := c.get_image_path(collection_name, image_name) or { return false }
return true
}
// get_page_content returns the content of a page in a collection
pub fn (mut c AtlasClient) get_page_content(collection_name string, page_name string) !string {
// Get the path for the page
page_path := c.get_page_path(collection_name, page_name)!
// Use pathlib to read the file content
mut path := pathlib.get_file(path: page_path)!
// Check if the file exists
if !path.exists() {
return error('page_not_found: Page file "${page_path}" does not exist on disk')
}
// Read and return the file content
return path.read()!
}
// list_collections returns a list of all collection names
// Collections are directories in {export_dir}/content/
pub fn (mut c AtlasClient) list_collections() ![]string {
content_dir := os.join_path(c.export_dir, 'content')
// Check if content directory exists
if !os.exists(content_dir) {
return error('invalid_export_structure: Content directory not found at "${content_dir}"')
}
// Get all subdirectories in content/
mut collections := []string{}
entries := os.ls(content_dir)!
for entry in entries {
entry_path := os.join_path(content_dir, entry)
if os.is_dir(entry_path) {
collections << entry
}
}
return collections
}
// list_pages returns a list of all page names in a collection
// Uses metadata to get the authoritative list of pages that belong to this collection
pub fn (mut c AtlasClient) list_pages(collection_name string) ![]string {
// Get metadata which contains the authoritative list of pages
metadata := c.get_collection_metadata(collection_name)!
// Extract page names from metadata
mut page_names := []string{}
for page_name, _ in metadata.pages {
page_names << page_name
}
return page_names
}
// list_files returns a list of all file names in a collection (excluding pages and images)
pub fn (mut c AtlasClient) list_files(collection_name string) ![]string {
metadata := c.get_collection_metadata(collection_name)!
mut file_names := []string{}
for file_name, file_meta in metadata.files {
if !file_meta.path.starts_with('img/') { // Exclude images
file_names << file_name
}
}
return file_names
}
// list_images returns a list of all image names in a collection
pub fn (mut c AtlasClient) list_images(collection_name string) ![]string {
metadata := c.get_collection_metadata(collection_name)!
mut images := []string{}
for file_name, file_meta in metadata.files {
if file_meta.path.starts_with('img/') {
images << file_name
}
}
return images
}
// list_pages_map returns a map of collection names to a list of page names within that collection.
// The structure is map[collectionname][]pagename.
pub fn (mut c AtlasClient) list_pages_map() !map[string][]string {
mut result := map[string][]string{}
collections := c.list_collections()!
for col_name in collections {
mut page_names := c.list_pages(col_name)!
page_names.sort()
result[col_name] = page_names
}
return result
}
// get_collection_metadata reads and parses the metadata JSON file for a collection
// Metadata is stored in {export_dir}/meta/{collection}.json
pub fn (mut c AtlasClient) get_collection_metadata(collection_name string) !CollectionMetadata {
// Apply name normalization
fixed_collection_name := texttools.name_fix(collection_name)
meta_path := os.join_path(c.export_dir, 'meta', '${fixed_collection_name}.json')
// Check if metadata file exists
if !os.exists(meta_path) {
return error('collection_not_found: Metadata file for collection "${collection_name}" not found at "${meta_path}"')
}
// Read and parse the JSON file
content := os.read_file(meta_path)!
metadata := json.decode(CollectionMetadata, content)!
return metadata
}
// get_collection_errors returns the errors for a collection from metadata
pub fn (mut c AtlasClient) get_collection_errors(collection_name string) ![]ErrorMetadata {
metadata := c.get_collection_metadata(collection_name)!
return metadata.errors
}
// has_errors checks if a collection has any errors
pub fn (mut c AtlasClient) has_errors(collection_name string) bool {
errors := c.get_collection_errors(collection_name) or { return false }
return errors.len > 0
}
pub fn (mut c AtlasClient) copy_collection(collection_name string, destination_path string) ! {
// TODO: list over all pages, links & files and copy them to destination
}
// will copy all pages linked from a page to a destination directory as well as the page itself
pub fn (mut c AtlasClient) copy_pages(collection_name string, page_name string, destination_path string) ! {
// TODO: copy page itself
// Get page links from metadata
links := c.get_page_links(collection_name, page_name)!
// Create img subdirectory
mut img_dest := pathlib.get_dir(path: '${destination_path}', create: true)!
// Copy only image links
for link in links {
if link.file_type != .page {
continue
}
if link.status == .external {
continue
}
// Get image path and copy
img_path := c.get_page_path(link.target_collection_name, link.target_item_name)!
mut src := pathlib.get_file(path: img_path)!
src.copy(dest: '${img_dest.path}/${src.name_fix_no_ext()}')!
console.print_debug(' ********. Copied page: ${src.path} to ${img_dest.path}/${src.name_fix_no_ext()}')
}
}
pub fn (mut c AtlasClient) copy_images(collection_name string, page_name string, destination_path string) ! {
// Get page links from metadata
links := c.get_page_links(collection_name, page_name)!
// Create img subdirectory
mut img_dest := pathlib.get_dir(path: '${destination_path}/img', create: true)!
// Copy only image links
for link in links {
if link.file_type != .image {
continue
}
if link.status == .external {
continue
}
// Get image path and copy
img_path := c.get_image_path(link.target_collection_name, link.target_item_name)!
mut src := pathlib.get_file(path: img_path)!
src.copy(dest: '${img_dest.path}/${src.name_fix_no_ext()}')!
// console.print_debug('Copied image: ${src.path} to ${img_dest.path}/${src.name_fix()}')
}
}
// copy_files copies all non-image files from a page to a destination directory
// Files are placed in {destination}/files/ subdirectory
// Only copies files referenced in the page (via links)
pub fn (mut c AtlasClient) copy_files(collection_name string, page_name string, destination_path string) ! {
// Get page links from metadata
links := c.get_page_links(collection_name, page_name)!
// Create files subdirectory
mut files_dest := pathlib.get_dir(path: '${destination_path}/files', create: true)!
// Copy only file links (non-image files)
for link in links {
if link.file_type != .file {
continue
}
if link.status == .external {
continue
}
// println(link)
// Get file path and copy
file_path := c.get_file_path(link.target_collection_name, link.target_item_name)!
mut src := pathlib.get_file(path: file_path)!
// src.copy(dest: '${files_dest.path}/${src.name_fix_no_ext()}')!
console.print_debug('Copied file: ${src.path} to ${files_dest.path}/${src.name_fix_no_ext()}')
}
}

View File

@@ -0,0 +1,119 @@
module client
import incubaid.herolib.core.pathlib
import incubaid.herolib.core.texttools
import incubaid.herolib.ui.console
import os
import json
import incubaid.herolib.core.redisclient
// get_page_links returns all links found in a page and pages linked to it (recursive)
// This includes transitive links through page-to-page references
// External links, files, and images do not recurse further
pub fn (mut c AtlasClient) get_page_links(collection_name string, page_name string) ![]LinkMetadata {
mut visited := map[string]bool{}
mut all_links := []LinkMetadata{}
c.collect_page_links_recursive(collection_name, page_name, mut visited, mut all_links)!
return all_links
}
// collect_page_links_recursive is the internal recursive implementation
// It traverses all linked pages and collects all links found
//
// Thread safety: Each call to get_page_links gets its own visited map
// Circular references are prevented by tracking visited pages
//
// Link types behavior:
// - .page links: Recursively traverse to get links from the target page
// - .file and .image links: Included in results but not recursively expanded
// - .external links: Included in results but not recursively expanded
fn (mut c AtlasClient) collect_page_links_recursive(collection_name string, page_name string, mut visited map[string]bool, mut all_links []LinkMetadata) ! {
// Create unique key for cycle detection
page_key := '${collection_name}:${page_name}'
// Prevent infinite loops on circular page references
// Example: Page A Page B Page A
if page_key in visited {
return
}
visited[page_key] = true
// Get collection metadata
metadata := c.get_collection_metadata(collection_name)!
fixed_page_name := texttools.name_fix(page_name)
// Find the page in metadata
if fixed_page_name !in metadata.pages {
return error('page_not_found: Page "${page_name}" not found in collection metadata, for collection: "${collection_name}"')
}
page_meta := metadata.pages[fixed_page_name]
// Add all direct links from this page to the result
// This includes: pages, files, images, and external links
all_links << page_meta.links
// Recursively traverse only page-to-page links
for link in page_meta.links {
// Only recursively process links to other pages within the doctree
// Skip external links (http, https, mailto, etc.)
// Skip file and image links (these don't have "contained" links)
if link.file_type != .page || link.status == .external {
continue
}
// Recursively collect links from the target page
c.collect_page_links_recursive(link.target_collection_name, link.target_item_name, mut visited, mut all_links) or {
// If we encounter an error (e.g., target page doesn't exist in metadata),
// we continue processing other links rather than failing completely
// This provides graceful degradation for broken link references
continue
}
}
}
// get_image_links returns all image links found in a page and related pages (recursive)
// This is a convenience function that filters get_page_links to only image links
pub fn (mut c AtlasClient) get_image_links(collection_name string, page_name string) ![]LinkMetadata {
all_links := c.get_page_links(collection_name, page_name)!
mut image_links := []LinkMetadata{}
for link in all_links {
if link.file_type == .image {
image_links << link
}
}
return image_links
}
// get_file_links returns all file links (non-image) found in a page and related pages (recursive)
// This is a convenience function that filters get_page_links to only file links
pub fn (mut c AtlasClient) get_file_links(collection_name string, page_name string) ![]LinkMetadata {
all_links := c.get_page_links(collection_name, page_name)!
mut file_links := []LinkMetadata{}
for link in all_links {
if link.file_type == .file {
file_links << link
}
}
return file_links
}
// get_page_link_targets returns all page-to-page link targets found in a page and related pages
// This is a convenience function that filters get_page_links to only page links
pub fn (mut c AtlasClient) get_page_link_targets(collection_name string, page_name string) ![]LinkMetadata {
all_links := c.get_page_links(collection_name, page_name)!
mut page_links := []LinkMetadata{}
for link in all_links {
if link.file_type == .page && link.status != .external {
page_links << link
}
}
return page_links
}

View File

@@ -0,0 +1,725 @@
module client
import os
import incubaid.herolib.core.texttools
// Helper function to create a test export directory structure
fn setup_test_export() string {
test_dir := os.join_path(os.temp_dir(), 'doctree_client_test_${os.getpid()}')
// Clean up if exists
if os.exists(test_dir) {
os.rmdir_all(test_dir) or {}
}
// Create directory structure
os.mkdir_all(os.join_path(test_dir, 'content', 'testcollection')) or { panic(err) }
os.mkdir_all(os.join_path(test_dir, 'content', 'anothercollection')) or { panic(err) }
os.mkdir_all(os.join_path(test_dir, 'meta')) or { panic(err) }
// Create test pages
os.write_file(os.join_path(test_dir, 'content', 'testcollection', 'page1.md'), '# Page 1\n\nContent here.') or {
panic(err)
}
os.write_file(os.join_path(test_dir, 'content', 'testcollection', 'page2.md'), '# Page 2\n\n![logo](logo.png)') or {
panic(err)
}
os.write_file(os.join_path(test_dir, 'content', 'anothercollection', 'intro.md'),
'# Intro\n\nWelcome!') or { panic(err) }
// Create test images
os.mkdir_all(os.join_path(test_dir, 'content', 'testcollection', 'img')) or { panic(err) }
os.write_file(os.join_path(test_dir, 'content', 'testcollection', 'img', 'logo.png'),
'fake png data') or { panic(err) }
os.write_file(os.join_path(test_dir, 'content', 'testcollection', 'img', 'banner.jpg'),
'fake jpg data') or { panic(err) }
// Create test files
os.mkdir_all(os.join_path(test_dir, 'content', 'testcollection', 'files')) or { panic(err) }
os.write_file(os.join_path(test_dir, 'content', 'testcollection', 'files', 'data.csv'),
'col1,col2\nval1,val2') or { panic(err) }
// Create metadata files
metadata1 := '{
"name": "testcollection",
"path": "",
"pages": {
"page1": {
"name": "page1",
"path": "",
"collection_name": "testcollection",
"links": []
},
"page2": {
"name": "page2",
"path": "",
"collection_name": "testcollection",
"links": [
{
"src": "logo.png",
"text": "logo",
"target": "logo.png",
"line": 3,
"target_collection_name": "testcollection",
"target_item_name": "logo.png",
"status": "ok",
"file_type": "image"
},
{
"src": "data.csv",
"text": "data",
"target": "data.csv",
"line": 4,
"target_collection_name": "testcollection",
"target_item_name": "data.csv",
"status": "ok",
"file_type": "file"
}
]
}
},
"files": {
"logo.png": {
"name": "logo.png",
"path": "img/logo.png"
},
"banner.jpg": {
"name": "banner.jpg",
"path": "img/banner.jpg"
},
"data.csv": {
"name": "data.csv",
"path": "files/data.csv"
}
},
"errors": []
}'
os.write_file(os.join_path(test_dir, 'meta', 'testcollection.json'), metadata1) or {
panic(err)
}
metadata2 := '{
"name": "anothercollection",
"path": "",
"pages": {
"intro": {
"name": "intro",
"path": "",
"collection_name": "anothercollection",
"links": []
}
},
"files": {},
"errors": [
{
"category": "test",
"page_key": "intro",
"message": "Test error",
"line": 10
}
]
}'
os.write_file(os.join_path(test_dir, 'meta', 'anothercollection.json'), metadata2) or {
panic(err)
}
return test_dir
}
// Helper function to cleanup test directory
fn cleanup_test_export(test_dir string) {
os.rmdir_all(test_dir) or {}
}
// Test creating a new client
fn test_new_client() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
assert client.export_dir == test_dir
}
// Test creating client with non-existent directory
fn test_new_client_nonexistent_dir() {
mut client := new(export_dir: '/nonexistent/path/to/export') or { panic(err) }
// Client creation should succeed, but operations will fail
assert client.export_dir == '/nonexistent/path/to/export'
}
// Test get_page_path - success
fn test_get_page_path_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
path := client.get_page_path('testcollection', 'page1') or { panic(err) }
assert path.contains('testcollection')
assert path.ends_with('page1.md')
assert os.exists(path)
}
// Test get_page_path - with naming normalization
fn test_get_page_path_normalization() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
// Create a page with normalized name
normalized_name := texttools.name_fix('Test_Page-Name')
os.write_file(os.join_path(test_dir, 'content', 'testcollection', '${normalized_name}.md'),
'# Test') or { panic(err) }
mut client := new(export_dir: test_dir) or { panic(err) }
// Should find the page regardless of input format
path := client.get_page_path('testcollection', 'Test_Page-Name') or { panic(err) }
assert os.exists(path)
}
// Test get_page_path - page not found
fn test_get_page_path_not_found() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.get_page_path('testcollection', 'nonexistent') or {
assert err.msg().contains('page_not_found')
assert err.msg().contains('nonexistent')
return
}
assert false, 'Should have returned an error'
}
// Test get_page_path - export dir not found
fn test_get_page_path_no_export_dir() {
mut client := new(export_dir: '/nonexistent/path') or { panic(err) }
client.get_page_path('testcollection', 'page1') or {
assert err.msg().contains('export_dir_not_found')
return
}
assert false, 'Should have returned an error'
}
// Test get_file_path - success
fn test_get_file_path_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
path := client.get_file_path('testcollection', 'data.csv') or { panic(err) }
assert path.contains('testcollection')
assert path.ends_with('data.csv')
assert os.exists(path)
}
// Test get_file_path - file not found
fn test_get_file_path_not_found() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.get_file_path('testcollection', 'missing.pdf') or {
assert err.msg().contains('file_not_found')
assert err.msg().contains('missing.pdf')
return
}
assert false, 'Should have returned an error'
}
// Test get_image_path - success
fn test_get_image_path_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
path := client.get_image_path('testcollection', 'logo.png') or { panic(err) }
assert path.contains('testcollection')
assert path.ends_with('logo.png')
assert os.exists(path)
}
// Test get_image_path - image not found
fn test_get_image_path_not_found() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.get_image_path('testcollection', 'missing.jpg') or {
assert err.msg().contains('image_not_found')
assert err.msg().contains('missing.jpg')
return
}
assert false, 'Should have returned an error'
}
// Test page_exists - true
fn test_page_exists_true() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
exists := client.page_exists('testcollection', 'page1')
assert exists == true
}
// Test page_exists - false
fn test_page_exists_false() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
exists := client.page_exists('testcollection', 'nonexistent')
assert exists == false
}
// Test file_exists - true
fn test_file_exists_true() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
exists := client.file_exists('testcollection', 'data.csv')
assert exists == true
}
// Test file_exists - false
fn test_file_exists_false() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
exists := client.file_exists('testcollection', 'missing.pdf')
assert exists == false
}
// Test image_exists - true
fn test_image_exists_true() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
exists := client.image_exists('testcollection', 'logo.png')
assert exists == true
}
// Test image_exists - false
fn test_image_exists_false() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
exists := client.image_exists('testcollection', 'missing.svg')
assert exists == false
}
// Test get_page_content - success
fn test_get_page_content_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
content := client.get_page_content('testcollection', 'page1') or { panic(err) }
assert content.contains('# Page 1')
assert content.contains('Content here.')
}
// Test get_page_content - page not found
fn test_get_page_content_not_found() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.get_page_content('testcollection', 'nonexistent') or {
assert err.msg().contains('page_not_found')
return
}
assert false, 'Should have returned an error'
}
// Test list_collections
fn test_list_collections() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
collections := client.list_collections() or { panic(err) }
assert collections.len == 2
assert 'testcollection' in collections
assert 'anothercollection' in collections
}
// Test list_collections - no content dir
fn test_list_collections_no_content_dir() {
test_dir := os.join_path(os.temp_dir(), 'empty_export_${os.getpid()}')
os.mkdir_all(test_dir) or { panic(err) }
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.list_collections() or {
assert err.msg().contains('invalid_export_structure')
return
}
assert false, 'Should have returned an error'
}
// Test list_pages - success
fn test_list_pages_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
pages := client.list_pages('testcollection') or { panic(err) }
assert pages.len == 2
assert 'page1' in pages
assert 'page2' in pages
}
// Test list_pages - collection not found
fn test_list_pages_collection_not_found() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.list_pages('nonexistent') or {
assert err.msg().contains('collection_not_found')
return
}
assert false, 'Should have returned an error'
}
// Test list_files - success
fn test_list_files_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
files := client.list_files('testcollection') or { panic(err) }
assert files.len == 1
assert 'data.csv' in files
}
// Test list_files - no files
fn test_list_files_empty() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
files := client.list_files('anothercollection') or { panic(err) }
assert files.len == 0
}
// Test list_images - success
fn test_list_images_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
images := client.list_images('testcollection') or { panic(err) }
assert images.len == 2
assert 'logo.png' in images
assert 'banner.jpg' in images
}
// Test list_images - no images
fn test_list_images_empty() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
images := client.list_images('anothercollection') or { panic(err) }
assert images.len == 0
}
// Test list_pages_map
fn test_list_pages_map() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
pages_map := client.list_pages_map() or { panic(err) }
assert pages_map.len == 2
assert 'testcollection' in pages_map
assert 'anothercollection' in pages_map
assert pages_map['testcollection'].len == 2
assert pages_map['anothercollection'].len == 1
}
// Test list_markdown
fn test_list_markdown() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
markdown := client.list_markdown() or { panic(err) }
assert markdown.contains('testcollection')
assert markdown.contains('anothercollection')
assert markdown.contains('page1')
assert markdown.contains('page2')
assert markdown.contains('intro')
assert markdown.contains('##')
assert markdown.contains('*')
}
// Test get_collection_metadata - success
fn test_get_collection_metadata_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
metadata := client.get_collection_metadata('testcollection') or { panic(err) }
assert metadata.name == 'testcollection'
assert metadata.pages.len == 2
assert metadata.errors.len == 0
}
// Test get_collection_metadata - with errors
fn test_get_collection_metadata_with_errors() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
metadata := client.get_collection_metadata('anothercollection') or { panic(err) }
assert metadata.name == 'anothercollection'
assert metadata.pages.len == 1
assert metadata.errors.len == 1
assert metadata.errors[0].message == 'Test error'
assert metadata.errors[0].line == 10
}
// Test get_collection_metadata - not found
fn test_get_collection_metadata_not_found() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.get_collection_metadata('nonexistent') or {
assert err.msg().contains('collection_not_found')
return
}
assert false, 'Should have returned an error'
}
// Test get_page_links - success
fn test_get_page_links_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
links := client.get_page_links('testcollection', 'page2') or { panic(err) }
assert links.len == 2
assert links[0].target_item_name == 'logo.png'
assert links[0].target_collection_name == 'testcollection'
assert links[0].file_type == .image
}
// Test get_page_links - no links
fn test_get_page_links_empty() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
links := client.get_page_links('testcollection', 'page1') or { panic(err) }
assert links.len == 0
}
// Test get_page_links - page not found
fn test_get_page_links_page_not_found() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.get_page_links('testcollection', 'nonexistent') or {
assert err.msg().contains('page_not_found')
return
}
assert false, 'Should have returned an error'
}
// Test get_collection_errors - success
fn test_get_collection_errors_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
errors := client.get_collection_errors('anothercollection') or { panic(err) }
assert errors.len == 1
assert errors[0].message == 'Test error'
}
// Test get_collection_errors - no errors
fn test_get_collection_errors_empty() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
errors := client.get_collection_errors('testcollection') or { panic(err) }
assert errors.len == 0
}
// Test has_errors - true
fn test_has_errors_true() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
has_errors := client.has_errors('anothercollection')
assert has_errors == true
}
// Test has_errors - false
fn test_has_errors_false() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
has_errors := client.has_errors('testcollection')
assert has_errors == false
}
// Test has_errors - collection not found
fn test_has_errors_collection_not_found() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
has_errors := client.has_errors('nonexistent')
assert has_errors == false
}
// Test copy_images - success
fn test_copy_images_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
dest_dir := os.join_path(os.temp_dir(), 'copy_dest_${os.getpid()}')
os.mkdir_all(dest_dir) or { panic(err) }
defer { cleanup_test_export(dest_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.copy_images('testcollection', 'page2', dest_dir) or { panic(err) }
// Check that logo.png was copied to img subdirectory
assert os.exists(os.join_path(dest_dir, 'img', 'logo.png'))
}
// Test copy_images - no images
fn test_copy_images_no_images() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
dest_dir := os.join_path(os.temp_dir(), 'copy_dest_empty_${os.getpid()}')
os.mkdir_all(dest_dir) or { panic(err) }
defer { cleanup_test_export(dest_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.copy_images('testcollection', 'page1', dest_dir) or { panic(err) }
// Should succeed even with no images
assert true
}
// Test copy_files - success
fn test_copy_files_success() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
dest_dir := os.join_path(os.temp_dir(), 'copy_files_dest_${os.getpid()}')
os.mkdir_all(dest_dir) or { panic(err) }
defer { cleanup_test_export(dest_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
// Note: test data would need to be updated to have file links in page2
// For now, this test demonstrates the pattern
client.copy_files('testcollection', 'page2', dest_dir) or { panic(err) }
// Check that files were copied to files subdirectory
// assert os.exists(os.join_path(dest_dir, 'files', 'somefile.csv'))
}
// Test copy_files - no files
fn test_copy_files_no_files() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
dest_dir := os.join_path(os.temp_dir(), 'copy_files_empty_${os.getpid()}')
os.mkdir_all(dest_dir) or { panic(err) }
defer { cleanup_test_export(dest_dir) }
mut client := new(export_dir: test_dir) or { panic(err) }
client.copy_files('testcollection', 'page1', dest_dir) or { panic(err) }
// Should succeed even with no file links
assert true
}
// Test naming normalization edge cases
fn test_naming_normalization_underscores() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
// Create page with underscores
normalized := texttools.name_fix('test_page_name')
os.write_file(os.join_path(test_dir, 'content', 'testcollection', '${normalized}.md'),
'# Test') or { panic(err) }
mut client := new(export_dir: test_dir) or { panic(err) }
// Should find with underscores
exists := client.page_exists('testcollection', 'test_page_name')
assert exists == true
}
// Test naming normalization edge cases - dashes
fn test_naming_normalization_dashes() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
// Create page with dashes
normalized := texttools.name_fix('test-page-name')
os.write_file(os.join_path(test_dir, 'content', 'testcollection', '${normalized}.md'),
'# Test') or { panic(err) }
mut client := new(export_dir: test_dir) or { panic(err) }
// Should find with dashes
exists := client.page_exists('testcollection', 'test-page-name')
assert exists == true
}
// Test naming normalization edge cases - mixed case
fn test_naming_normalization_case() {
test_dir := setup_test_export()
defer { cleanup_test_export(test_dir) }
// Create page with mixed case
normalized := texttools.name_fix('TestPageName')
os.write_file(os.join_path(test_dir, 'content', 'testcollection', '${normalized}.md'),
'# Test') or { panic(err) }
mut client := new(export_dir: test_dir) or { panic(err) }
// Should find with mixed case
exists := client.page_exists('testcollection', 'TestPageName')
assert exists == true
}

View File

@@ -0,0 +1,21 @@
module client
import incubaid.herolib.core.base
@[params]
pub struct AtlasClientArgs {
pub:
export_dir string @[required] // Path to doctree export directory
}
// Create a new AtlasClient instance
// The export_dir should point to the directory containing content/ and meta/ subdirectories
pub fn new(args AtlasClientArgs) !&AtlasClient {
mut context := base.context()!
mut redis := context.redis()!
return &AtlasClient{
redis: redis
export_dir: args.export_dir
}
}

View File

@@ -0,0 +1,28 @@
module client
// list_markdown returns the collections and their pages in markdown format.
pub fn (mut c AtlasClient) list_markdown() !string {
mut markdown_output := ''
pages_map := c.list_pages_map()!
if pages_map.len == 0 {
return 'No collections or pages found in this doctree export.'
}
mut sorted_collections := pages_map.keys()
sorted_collections.sort()
for col_name in sorted_collections {
page_names := pages_map[col_name]
markdown_output += '## ${col_name}\n'
if page_names.len == 0 {
markdown_output += ' * No pages in this collection.\n'
} else {
for page_name in page_names {
markdown_output += ' * ${page_name}\n'
}
}
markdown_output += '\n' // Add a newline for spacing between collections
}
return markdown_output
}

View File

@@ -0,0 +1,66 @@
module client
// AtlasClient provides access to DocTree-exported documentation collections
// It reads from both the exported directory structure and Redis metadata
// List of recognized image file extensions
const image_extensions = ['.png', '.jpg', '.jpeg', '.gif', '.svg', '.webp', '.bmp', '.tiff', '.ico']
// CollectionMetadata represents the metadata stored in meta/{collection}.json
pub struct CollectionMetadata {
pub mut:
name string
path string
pages map[string]PageMetadata
files map[string]FileMetadata
errors []ErrorMetadata
}
pub struct PageMetadata {
pub mut:
name string
path string
collection_name string
links []LinkMetadata
}
pub struct FileMetadata {
pub mut:
name string // name WITH extension (e.g., "image.png", "data.csv")
path string // relative path in export (e.g., "img/image.png" or "files/data.csv")
}
pub struct LinkMetadata {
pub mut:
src string
text string
target string
line int
target_collection_name string
target_item_name string
status LinkStatus
file_type LinkFileType
}
pub enum LinkStatus {
init
external
found
not_found
anchor
error
}
pub enum LinkFileType {
page // Default: link to another page
file // Link to a non-image file
image // Link to an image file
}
pub struct ErrorMetadata {
pub mut:
category string
page_key string
message string
line int
}

View File

@@ -0,0 +1,418 @@
module core
import incubaid.herolib.core.pathlib
import incubaid.herolib.web.doctree as doctreetools
import incubaid.herolib.develop.gittools
import incubaid.herolib.data.paramsparser { Params }
import incubaid.herolib.ui.console
import os
pub struct Session {
pub mut:
user string // username
email string // user's email (lowercase internally)
params Params // additional context from request/webserver
}
@[heap]
pub struct Collection {
pub mut:
name string
path string // absolute path
pages map[string]&Page
files map[string]&File
doctree &DocTree @[skip; str: skip]
errors []CollectionError
error_cache map[string]bool
git_url string
acl_read []string // Group names allowed to read (lowercase)
acl_write []string // Group names allowed to write (lowercase)
}
// Read content without processing includes
pub fn (mut c Collection) path() !pathlib.Path {
return pathlib.get_dir(path: c.path, create: false)!
}
fn (mut c Collection) init_pre() ! {
mut p := mut c.path()!
c.scan(mut p)!
c.scan_acl()!
}
fn (mut c Collection) init_post() ! {
c.validate_links()!
c.init_git_info()!
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// Add a page to the collection
fn (mut c Collection) add_page(mut path pathlib.Path) ! {
name := path.name_fix_no_ext()
if name in c.pages {
return error('Page ${name} already exists in collection ${c.name}')
}
// Use absolute paths for path_relative to work correctly
mut col_path := pathlib.get(c.path)
mut page_abs_path := pathlib.get(path.absolute())
relativepath := page_abs_path.path_relative(col_path.absolute())!
mut p_new := Page{
name: name
path: relativepath
collection_name: c.name
collection: &c
}
c.pages[name] = &p_new
}
// Add an image to the collection
fn (mut c Collection) add_file(mut p pathlib.Path) ! {
name := p.name_fix_no_ext() // keep extension
if name in c.files {
return error('File ${name} already exists in collection ${c.name}')
}
// Use absolute paths for path_relative to work correctly
mut col_path := pathlib.get(c.path)
mut file_abs_path := pathlib.get(p.absolute())
relativepath := file_abs_path.path_relative(col_path.absolute())!
mut file_new := File{
name: name
path: relativepath // relative path of file in the collection, includes the name
collection: &c
}
if p.is_image() {
file_new.ftype = .image
} else {
file_new.ftype = .file
}
c.files[name] = &file_new
}
// Get a page by name
pub fn (c Collection) page_get(name_ string) !&Page {
name := doctreetools.name_fix(name_)
return c.pages[name] or { return PageNotFound{
collection: c.name
page: name
} }
}
// Get an image by name
pub fn (c Collection) image_get(name_ string) !&File {
name := doctreetools.name_fix(name_)
mut img := c.files[name] or { return FileNotFound{
collection: c.name
file: name
} }
if img.ftype != .image {
return error('File `${name}` in collection ${c.name} is not an image')
}
return img
}
// Get a file by name
pub fn (c Collection) file_get(name_ string) !&File {
name := doctreetools.name_fix(name_)
mut f := c.files[name] or { return FileNotFound{
collection: c.name
file: name
} }
if f.ftype != .file {
return error('File `${name}` in collection ${c.name} is not a file')
}
return f
}
pub fn (c Collection) file_or_image_get(name_ string) !&File {
name := doctreetools.name_fix(name_)
mut f := c.files[name] or { return FileNotFound{
collection: c.name
file: name
} }
return f
}
// Check if page exists
pub fn (c Collection) page_exists(name_ string) !bool {
name := doctreetools.name_fix(name_)
return name in c.pages
}
// Check if image exists
pub fn (c Collection) image_exists(name_ string) !bool {
name := doctreetools.name_fix(name_)
f := c.files[name] or { return false }
return f.ftype == .image
}
// Check if file exists
pub fn (c Collection) file_exists(name_ string) !bool {
name := doctreetools.name_fix(name_)
f := c.files[name] or { return false }
return f.ftype == .file
}
pub fn (c Collection) file_or_image_exists(name_ string) !bool {
name := doctreetools.name_fix(name_)
_ := c.files[name] or { return false }
return true
}
@[params]
pub struct CollectionErrorArgs {
pub mut:
category CollectionErrorCategory @[required]
message string @[required]
page_key string
file string
show_console bool // Show error in console immediately
log_error bool = true // Log to errors array (default: true)
}
// Report an error, avoiding duplicates based on hash
pub fn (mut c Collection) error(args CollectionErrorArgs) {
// Create error struct
err := CollectionError{
category: args.category
page_key: args.page_key
message: args.message
file: args.file
}
// Calculate hash for deduplication
hash := err.hash()
// Check if this error was already reported
if hash in c.error_cache {
return
}
// Mark this error as reported
c.error_cache[hash] = true
// Log to errors array if requested
if args.log_error {
c.errors << err
}
// Show in console if requested
if args.show_console {
console.print_stderr('[${c.name}] ${err.str()}')
}
}
// Get all errors
pub fn (c Collection) get_errors() []CollectionError {
return c.errors
}
// Check if collection has errors
pub fn (c Collection) has_errors() bool {
return c.errors.len > 0
}
// Clear all errors
pub fn (mut c Collection) clear_errors() {
c.errors = []CollectionError{}
c.error_cache = map[string]bool{}
}
// Get error summary by category
pub fn (c Collection) error_summary() map[CollectionErrorCategory]int {
mut summary := map[CollectionErrorCategory]int{}
for err in c.errors {
summary[err.category] = summary[err.category] + 1
}
return summary
}
// Print all errors to console
pub fn (c Collection) print_errors() {
if c.errors.len == 0 {
console.print_green('Collection ${c.name}: No errors')
return
}
console.print_header('Collection ${c.name} - Errors (${c.errors.len})')
for err in c.errors {
console.print_stderr(' ${err.str()}')
}
}
// Validate all links in collection
pub fn (mut c Collection) validate_links() ! {
for _, mut page in c.pages {
content := page.content(include: true)!
page.links = page.find_links(content)! // will walk over links see if errors and add errors
}
}
// Fix all links in collection (rewrite files)
pub fn (mut c Collection) fix_links() ! {
for _, mut page in c.pages {
// Read original content
content := page.content()!
// Fix links
fixed_content := page.content_with_fixed_links()!
// Write back if changed
if fixed_content != content {
mut p := page.path()!
p.write(fixed_content)!
}
}
}
// Check if session can read this collection
pub fn (c Collection) can_read(session Session) bool {
// If no ACL set, everyone can read
if c.acl_read.len == 0 {
return true
}
// Get user's groups
mut doctree := c.doctree
groups := doctree.groups_get(session)
group_names := groups.map(it.name)
// Check if any of user's groups are in read ACL
for acl_group in c.acl_read {
if acl_group in group_names {
return true
}
}
return false
}
// Check if session can write this collection
pub fn (c Collection) can_write(session Session) bool {
// If no ACL set, no one can write
if c.acl_write.len == 0 {
return false
}
// Get user's groups
mut doctree := c.doctree
groups := doctree.groups_get(session)
group_names := groups.map(it.name)
// Check if any of user's groups are in write ACL
for acl_group in c.acl_write {
if acl_group in group_names {
return true
}
}
return false
}
// Detect git repository URL for a collection
fn (mut c Collection) init_git_info() ! {
mut current_path := c.path()!
// Walk up directory tree to find .git
mut git_repo := current_path.parent_find('.git') or {
// No git repo found
return
}
if git_repo.path == '' {
panic('Unexpected empty git repo path')
}
mut gs := gittools.new()!
mut p := c.path()!
mut location := gs.gitlocation_from_path(p.path)!
r := os.execute_opt('cd ${p.path} && git branch --show-current')!
location.branch_or_tag = r.output.trim_space()
c.git_url = location.web_url()!
}
////////////SCANNING FUNCTIONS ?//////////////////////////////////////////////////////
fn (mut c Collection) scan(mut dir pathlib.Path) ! {
mut entries := dir.list(recursive: false)!
for mut entry in entries.paths {
// Skip hidden files/dirs
if entry.name().starts_with('.') || entry.name().starts_with('_') {
continue
}
if entry.is_dir() {
// Recursively scan subdirectories
mut mutable_entry := entry
c.scan(mut mutable_entry)!
continue
}
// Process files based on extension
match entry.extension_lower() {
'md' {
mut mutable_entry := entry
c.add_page(mut mutable_entry)!
}
else {
mut mutable_entry := entry
c.add_file(mut mutable_entry)!
}
}
}
}
// Scan for ACL files
fn (mut c Collection) scan_acl() ! {
// Look for read.acl in collection directory
read_acl_path := '${c.path()!.path}/read.acl'
if os.exists(read_acl_path) {
content := os.read_file(read_acl_path)!
// Split by newlines and normalize
c.acl_read = content.split('\n')
.map(it.trim_space())
.filter(it.len > 0)
.map(it.to_lower())
}
// Look for write.acl in collection directory
write_acl_path := '${c.path()!.path}/write.acl'
if os.exists(write_acl_path) {
content := os.read_file(write_acl_path)!
// Split by newlines and normalize
c.acl_write = content.split('\n')
.map(it.trim_space())
.filter(it.len > 0)
.map(it.to_lower())
}
}
// scan_groups scans the collection's directory for .group files and loads them into memory.
pub fn (mut c Collection) scan_groups() ! {
if c.name != 'groups' {
return error('scan_groups only works on "groups" collection')
}
mut p := c.path()!
mut entries := p.list(recursive: false)!
for mut entry in entries.paths {
if entry.extension_lower() == 'group' {
filename := entry.name_fix_no_ext()
mut visited := map[string]bool{}
mut group := parse_group_file(filename, c.path()!.path, mut visited)!
c.doctree.group_add(mut group)!
}
}
}

View File

@@ -0,0 +1,65 @@
module core
import crypto.md5
import incubaid.herolib.ui.console
pub enum CollectionErrorCategory {
circular_include
missing_include
include_syntax_error
invalid_page_reference
invalid_file_reference
file_not_found
invalid_collection
general_error
acl_denied // NEW: Access denied by ACL
}
pub struct CollectionError {
pub mut:
category CollectionErrorCategory
page_key string // Format: "collection:page" or just collection name
message string
file string // Optional: specific file path if relevant
}
// Generate MD5 hash for error deduplication
// Hash is based on category + page_key (or file if page_key is empty)
pub fn (e CollectionError) hash() string {
mut hash_input := '${e.category}'
if e.page_key != '' {
hash_input += ':${e.page_key}'
} else if e.file != '' {
hash_input += ':${e.file}'
}
return md5.hexhash(hash_input)
}
// Get human-readable error message
pub fn (e CollectionError) str() string {
mut location := ''
if e.page_key != '' {
location = ' [${e.page_key}]'
} else if e.file != '' {
location = ' [${e.file}]'
}
return '[${e.category}]${location}: ${e.message}'
}
// Get category as string
pub fn (e CollectionError) category_str() string {
return match e.category {
.circular_include { 'Circular Include' }
.missing_include { 'Missing Include' }
.include_syntax_error { 'Include Syntax Error' }
.invalid_page_reference { 'Invalid Page Reference' }
.invalid_file_reference { 'Invalid File Reference' }
.file_not_found { 'File Not Found' }
.invalid_collection { 'Invalid Collection' }
.general_error { 'General Error' }
.acl_denied { 'ACL Access Denied' }
}
}

View File

@@ -0,0 +1,162 @@
module core
import incubaid.herolib.web.doctree
import incubaid.herolib.core.pathlib
import incubaid.herolib.ui.console
import incubaid.herolib.data.paramsparser
@[heap]
pub struct DocTree {
pub mut:
name string
collections map[string]&Collection
groups map[string]&Group // name -> Group mapping
}
// Create a new collection
fn (mut self DocTree) add_collection(mut path pathlib.Path) !Collection {
mut name := path.name_fix_no_ext()
mut filepath := path.file_get('.collection')!
content := filepath.read()!
if content.trim_space() != '' {
mut params := paramsparser.parse(content)!
if params.exists('name') {
name = params.get('name')!
}
}
name = doctree.name_fix(name)
console.print_item("Adding collection '${name}' to DocTree '${self.name}' at path '${path.path}'")
if name in self.collections {
return error('Collection ${name} already exists in DocTree ${self.name}')
}
mut c := Collection{
name: name
path: path.path // absolute path
doctree: &self // Set doctree reference
error_cache: map[string]bool{}
}
c.init_pre()!
self.collections[name] = &c
return c
}
// Get a collection by name
pub fn (a DocTree) get_collection(name string) !&Collection {
return a.collections[name] or {
return CollectionNotFound{
name: name
msg: 'Collection not found in DocTree ${a.name}'
}
}
}
// Validate all links in all collections
pub fn (mut a DocTree) init_post() ! {
for _, mut col in a.collections {
col.init_post()!
}
}
// Validate all links in all collections
pub fn (mut a DocTree) validate_links() ! {
for _, mut col in a.collections {
col.validate_links()!
}
}
// Fix all links in all collections (rewrite source files)
pub fn (mut a DocTree) fix_links() ! {
for _, mut col in a.collections {
col.fix_links()!
}
}
// Add a group to the doctree
pub fn (mut a DocTree) group_add(mut group Group) ! {
if group.name in a.groups {
return error('Group ${group.name} already exists')
}
a.groups[group.name] = &group
}
// Get a group by name
pub fn (a DocTree) group_get(name string) !&Group {
name_lower := doctree.name_fix(name)
return a.groups[name_lower] or { return error('Group ${name} not found') }
}
// Get all groups matching a session's email
pub fn (a DocTree) groups_get(session Session) []&Group {
mut matching := []&Group{}
email_lower := session.email.to_lower()
for _, group in a.groups {
if group.matches(email_lower) {
matching << group
}
}
return matching
}
//////////////////SCAN
// Scan a path for collections
@[params]
pub struct ScanArgs {
pub mut:
path string @[required]
ignore []string // list of directory names to ignore
}
pub fn (mut a DocTree) scan(args ScanArgs) ! {
mut path := pathlib.get_dir(path: args.path)!
mut ignore := args.ignore.clone()
ignore = ignore.map(it.to_lower())
a.scan_(mut path, ignore)!
}
// Scan a directory for collections
fn (mut a DocTree) scan_(mut dir pathlib.Path, ignore_ []string) ! {
console.print_item('Scanning directory: ${dir.path}')
if !dir.is_dir() {
return error('Path is not a directory: ${dir.path}')
}
// Check if this directory is a collection
if dir.file_exists('.collection') {
collname := dir.name_fix_no_ext()
if collname.to_lower() in ignore_ {
return
}
mut col := a.add_collection(mut dir)!
if collname == 'groups' {
col.scan_groups()!
}
return
}
// Scan subdirectories
mut entries := dir.list(recursive: false)!
for mut entry in entries.paths {
if !entry.is_dir() || should_skip_dir(entry) {
continue
}
mut mutable_entry := entry
a.scan_(mut mutable_entry, ignore_)!
}
}
// Check if directory should be skipped
fn should_skip_dir(entry pathlib.Path) bool {
name := entry.name()
return name.starts_with('.') || name.starts_with('_')
}

View File

@@ -0,0 +1,34 @@
module core
pub struct CollectionNotFound {
Error
pub:
name string
msg string
}
pub fn (err CollectionNotFound) msg() string {
return 'Collection ${err.name} not found: ${err.msg}'
}
pub struct PageNotFound {
Error
pub:
collection string
page string
}
pub fn (err PageNotFound) msg() string {
return 'Page ${err.page} not found in collection ${err.collection}'
}
pub struct FileNotFound {
Error
pub:
collection string
file string
}
pub fn (err FileNotFound) msg() string {
return 'File ${err.file} not found in collection ${err.collection}'
}

View File

@@ -0,0 +1,212 @@
module core
import incubaid.herolib.core.pathlib
import incubaid.herolib.core.base
import json
@[params]
pub struct ExportArgs {
pub mut:
destination string @[required]
reset bool = true
include bool = true
redis bool = true
}
// Export all collections
pub fn (mut a DocTree) export(args ExportArgs) ! {
mut dest := pathlib.get_dir(path: args.destination, create: true)!
if args.reset {
dest.empty()!
}
// Validate links before export to populate page.links
a.validate_links()!
for _, mut col in a.collections {
col.export(
destination: dest
reset: args.reset
include: args.include
redis: args.redis
)!
}
}
@[params]
pub struct CollectionExportArgs {
pub mut:
destination pathlib.Path @[required]
reset bool = true
include bool = true // process includes during export
redis bool = true
}
// Export a single collection
// Export a single collection with recursive link processing
pub fn (mut c Collection) export(args CollectionExportArgs) ! {
// Create collection directory
mut col_dir := pathlib.get_dir(
path: '${args.destination.path}/content/${c.name}'
create: true
)!
mut dir_meta := pathlib.get_dir(
path: '${args.destination.path}/meta/'
create: true
)!
if c.has_errors() {
c.print_errors()
}
meta := json.encode_pretty(c)
mut json_file := pathlib.get_file(
path: '${dir_meta.path}/${c.name}.json'
create: true
)!
json_file.write(meta)!
// Track all cross-collection pages and files that need to be exported
// Use maps with collection:name as key to track globally across all resolutions
mut cross_collection_pages := map[string]&Page{} // key: "collection:page_name"
mut cross_collection_files := map[string]&File{} // key: "collection:file_name"
mut processed_local_pages := map[string]bool{} // Track which local pages we've already processed
mut processed_cross_pages := map[string]bool{} // Track which cross-collection pages we've processed for links
// First pass: export all pages in this collection and recursively collect ALL cross-collection references
for _, mut page in c.pages {
// Get content with includes processed and links transformed for export
content := page.content_with_fixed_links(
include: args.include
cross_collection: true
export_mode: true
)!
mut dest_file := pathlib.get_file(path: '${col_dir.path}/${page.name}.md', create: true)!
dest_file.write(content)!
// Recursively collect cross-collection references from this page
c.collect_cross_collection_references(mut page, mut cross_collection_pages, mut
cross_collection_files, mut processed_cross_pages)!
// println('------- ${c.name} ${page.key()}')
// if page.key() == 'geoaware:solution' && c.name == 'mycelium_nodes_tiers' {
// println(cross_collection_pages)
// println(cross_collection_files)
// // println(processed_cross_pages)
// $dbg;
// }
// copy the pages to the right exported path
for _, mut ref_page in cross_collection_pages {
mut src_file := ref_page.path()!
mut subdir_path := pathlib.get_dir(
path: '${col_dir.path}'
create: true
)!
mut dest_path := '${subdir_path.path}/${ref_page.name}.md'
src_file.copy(dest: dest_path)!
// println(dest_path)
// $dbg;
}
// copy the files to the right exported path
for _, mut ref_file in cross_collection_files {
mut src_file2 := ref_file.path()!
// Determine subdirectory based on file type
mut subdir := if ref_file.is_image() { 'img' } else { 'files' }
// Ensure subdirectory exists
mut subdir_path := pathlib.get_dir(
path: '${col_dir.path}/${subdir}'
create: true
)!
mut dest_path := '${subdir_path.path}/${ref_file.name}'
mut dest_file2 := pathlib.get_file(path: dest_path, create: true)!
src_file2.copy(dest: dest_file2.path)!
}
processed_local_pages[page.name] = true
// Redis operations...
if args.redis {
mut context := base.context()!
mut redis := context.redis()!
redis.hset('doctree:${c.name}', page.name, page.path)!
}
}
// Copy all files/images from this collection to the export directory
for _, mut file in c.files {
mut src_file := file.path()!
// Determine subdirectory based on file type
mut subdir := if file.is_image() { 'img' } else { 'files' }
// Ensure subdirectory exists
mut subdir_path := pathlib.get_dir(
path: '${col_dir.path}/${subdir}'
create: true
)!
mut dest_path := '${subdir_path.path}/${file.name}'
mut dest_file := pathlib.get_file(path: dest_path, create: true)!
src_file.copy(dest: dest_file.path)!
}
}
// Helper function to recursively collect cross-collection references
// This processes a page's links and adds all non-local references to the collections
fn (mut c Collection) collect_cross_collection_references(mut page Page,
mut all_cross_pages map[string]&Page,
mut all_cross_files map[string]&File,
mut processed_pages map[string]bool) ! {
page_key := page.key()
// If we've already processed this page, skip it (prevents infinite loops with cycles)
if page_key in processed_pages {
return
}
// Mark this page as processed BEFORE recursing (prevents infinite loops with circular references)
processed_pages[page_key] = true
// Process all links in the current page
// Use cached links from validation (before transformation) to preserve collection info
for mut link in page.links {
if link.status != .found {
continue
}
is_local := link.target_collection_name == c.name
// Collect cross-collection page references and recursively process them
if link.file_type == .page && !is_local {
page_ref := '${link.target_collection_name}:${link.target_item_name}'
// Only add if not already collected
if page_ref !in all_cross_pages {
mut target_page := link.target_page()!
all_cross_pages[page_ref] = target_page
// Recursively process the target page's links to find more cross-collection references
// This ensures we collect ALL transitive cross-collection page and file references
c.collect_cross_collection_references(mut target_page, mut all_cross_pages, mut
all_cross_files, mut processed_pages)!
}
}
// Collect cross-collection file/image references
if (link.file_type == .file || link.file_type == .image) && !is_local {
file_key := '${link.target_collection_name}:${link.target_item_name}'
// Only add if not already collected
if file_key !in all_cross_files {
mut target_file := link.target_file()!
all_cross_files[file_key] = target_file
}
}
}
}

View File

@@ -0,0 +1,61 @@
module core
import incubaid.herolib.web.doctree as doctreetools
import incubaid.herolib.core.pathlib
import incubaid.herolib.ui.console
import incubaid.herolib.data.paramsparser
__global (
doctrees shared map[string]&DocTree
)
@[params]
pub struct AtlasNewArgs {
pub mut:
name string = 'default'
}
// Create a new DocTree
pub fn new(args AtlasNewArgs) !&DocTree {
mut name := doctreetools.name_fix(args.name)
mut a := &DocTree{
name: name
}
set(a)
return a
}
// Get DocTree from global map
pub fn get(name string) !&DocTree {
mut fixed_name := doctreetools.name_fix(name)
rlock doctrees {
if fixed_name in doctrees {
return doctrees[fixed_name] or { return error('DocTree ${name} not found') }
}
}
return error("DocTree '${name}' not found")
}
// Check if DocTree exists
pub fn exists(name string) bool {
mut fixed_name := doctreetools.name_fix(name)
rlock doctrees {
return fixed_name in doctrees
}
}
// List all DocTree names
pub fn list() []string {
rlock doctrees {
return doctrees.keys()
}
}
// Store DocTree in global map
fn set(doctree &DocTree) {
lock doctrees {
doctrees[doctree.name] = doctree
}
}

View File

@@ -0,0 +1,31 @@
module core
import incubaid.herolib.core.pathlib
import os
pub enum FileType {
file
image
}
pub struct File {
pub mut:
name string // name with extension
path string // relative path of file in the collection
ftype FileType // file or image
collection &Collection @[skip; str: skip] // Reference to parent collection
}
// Read content without processing includes
pub fn (mut f File) path() !pathlib.Path {
mut mypath := '${f.collection.path()!.path}/${f.path}'
return pathlib.get_file(path: mypath, create: false)!
}
pub fn (f File) is_image() bool {
return f.ftype == .image
}
pub fn (f File) ext() string {
return os.file_ext(f.name)
}

View File

@@ -0,0 +1,86 @@
module core
import incubaid.herolib.web.doctree
// Get a page from any collection using format "collection:page"
pub fn (a DocTree) page_get(key string) !&Page {
parts := key.split(':')
if parts.len != 2 {
return error('Invalid page key format. Use "collection:page" in page_get')
}
col := a.get_collection(parts[0])!
return col.page_get(parts[1])!
}
// Get an image from any collection using format "collection:image"
pub fn (a DocTree) image_get(key string) !&File {
parts := key.split(':')
if parts.len != 2 {
return error('Invalid image key format. Use "collection:image" in image_get')
}
col := a.get_collection(parts[0])!
return col.image_get(parts[1])!
}
// Get a file from any collection using format "collection:file"
pub fn (a DocTree) file_get(key string) !&File {
parts := key.split(':')
if parts.len != 2 {
return error('Invalid file key format. Use "collection:file" in file_get')
}
col := a.get_collection(parts[0])!
return col.file_get(parts[1])!
}
// Get a file (can be image) from any collection using format "collection:file"
pub fn (a DocTree) file_or_image_get(key string) !&File {
c, n := doctree.key_parse(key)!
col := a.get_collection(c)!
return col.file_or_image_get(n)!
}
// Check if page exists
pub fn (a DocTree) page_exists(key string) !bool {
c, n := doctree.key_parse(key)!
col := a.get_collection(c) or { return false }
return col.page_exists(n)
}
// Check if image exists
pub fn (a DocTree) image_exists(key string) !bool {
c, n := doctree.key_parse(key)!
col := a.get_collection(c) or { return false }
return col.image_exists(n)
}
// Check if file exists
pub fn (a DocTree) file_exists(key string) !bool {
c, n := doctree.key_parse(key)!
col := a.get_collection(c) or { return false }
return col.file_exists(n)
}
pub fn (a DocTree) file_or_image_exists(key string) !bool {
c, n := doctree.key_parse(key)!
col := a.get_collection(c) or { return false }
return col.file_or_image_exists(n)
}
// List all pages in DocTree
pub fn (a DocTree) list_pages() map[string][]string {
mut result := map[string][]string{}
for col_name, col in a.collections {
mut page_names := []string{}
for page_name, _ in col.pages {
page_names << page_name
}
page_names.sort()
result[col_name] = page_names
}
return result
}

View File

@@ -0,0 +1,104 @@
module core
import incubaid.herolib.web.doctree
import incubaid.herolib.core.pathlib
import os
@[heap]
pub struct Group {
pub mut:
name string // normalized to lowercase
patterns []string // email patterns, normalized to lowercase
}
@[params]
pub struct GroupNewArgs {
pub mut:
name string @[required]
patterns []string @[required]
}
// Create a new Group
pub fn new_group(args GroupNewArgs) !Group {
mut name := doctree.name_fix(args.name)
mut patterns := args.patterns.map(it.to_lower())
return Group{
name: name
patterns: patterns
}
}
// Check if email matches any pattern in this group
pub fn (g Group) matches(email string) bool {
email_lower := email.to_lower()
for pattern in g.patterns {
if matches_pattern(email_lower, pattern) {
return true
}
}
return false
}
// Helper: match email against wildcard pattern
// '*@domain.com' matches 'user@domain.com'
// 'exact@email.com' matches only 'exact@email.com'
fn matches_pattern(email string, pattern string) bool {
if pattern == '*' {
return true
}
if !pattern.contains('*') {
return email == pattern
}
// Handle wildcard patterns like '*@domain.com'
if pattern.starts_with('*') {
suffix := pattern[1..] // Remove the '*'
return email.ends_with(suffix)
}
// Could add more complex patterns here if needed
return false
}
// parse_group_file parses a single .group file, resolving includes recursively.
fn parse_group_file(filename string, base_path string, mut visited map[string]bool) !Group {
if filename in visited {
return error('Circular include detected: ${filename}')
}
visited[filename] = true
mut group := Group{
name: doctree.name_fix(filename)
patterns: []string{}
}
mut file_path := pathlib.get_file(path: '${base_path}/${filename}.group')!
content := file_path.read()!
for line_orig in content.split_into_lines() {
line := line_orig.trim_space()
if line.len == 0 || line.starts_with('//') {
continue
}
if line.starts_with('include:') {
mut included_name := line.trim_string_left('include:').trim_space()
included_name = included_name.replace('.group', '') // Remove .group if present
include_path := '${base_path}/${included_name}.group'
if !os.exists(include_path) {
return error('Included group file not found: ${included_name}.group')
}
included_group := parse_group_file(included_name, base_path, mut visited)!
group.patterns << included_group.patterns
} else {
group.patterns << line.to_lower()
}
}
return group
}

View File

@@ -0,0 +1,15 @@
in doctree/
check format of groups
see content/groups
now the groups end with .group
check how the include works, so we can include another group in the group as defined, only works in same folder
in the scan function in doctree, now make scan_groups function, find groups, only do this for collection as named groups
do not add collection groups to doctree, this is a system collection
make the groups and add them to doctree
give clear instructions for coding agent how to write the code

311
lib/web/doctree/core/link.v Normal file
View File

@@ -0,0 +1,311 @@
module core
import incubaid.herolib.web.doctree
import incubaid.herolib.ui.console
pub enum LinkFileType {
page // Default: link to another page
file // Link to a non-image file
image // Link to an image file
}
// Link represents a markdown link found in content
pub struct Link {
pub mut:
src string // Source content where link was found (what to replace)
text string // Link text [text]
target string // Original link target (the source text)
line int // Line number where link was found (1-based)
pos int // Character position in line where link starts (0-based)
target_collection_name string
target_item_name string
status LinkStatus
file_type LinkFileType // Type of the link target: file, image, or page (default)
page &Page @[skip; str: skip] // Reference to page where this link is found
}
pub enum LinkStatus {
init
external
found
not_found
anchor
error
}
// Get the collection:item key for this link
fn (mut self Link) key() string {
return '${self.target_collection_name}:${self.target_item_name}'
}
// Get the target page this link points to
pub fn (mut self Link) target_page() !&Page {
if self.status == .external {
return error('External links do not have a target page')
}
return self.page.collection.doctree.page_get(self.key())
}
// Get the target file this link points to
pub fn (mut self Link) target_file() !&File {
if self.status == .external {
return error('External links do not have a target file')
}
return self.page.collection.doctree.file_or_image_get(self.key())
}
// Find all markdown links in content
fn (mut p Page) find_links(content string) ![]Link {
mut links := []Link{}
mut lines := content.split_into_lines()
for line_idx, line in lines {
// println('Processing line ${line_idx + 1}: ${line}')
mut pos := 0
for {
mut image_open := line.index_after('!', pos) or { -1 }
// Find next [
open_bracket := line.index_after('[', pos) or { break }
// Find matching ]
close_bracket := line.index_after(']', open_bracket) or { break }
// Check for (
if close_bracket + 1 >= line.len || line[close_bracket + 1] != `(` {
pos = close_bracket + 1
// println('no ( after ]: skipping, ${line}')
continue
}
if image_open + 1 != open_bracket {
image_open = -1
}
// Find matching )
open_paren := close_bracket + 1
close_paren := line.index_after(')', open_paren) or { break }
// Extract link components
text := line[open_bracket + 1..close_bracket]
target := line[open_paren + 1..close_paren]
// Determine link type based on content
mut detected_file_type := LinkFileType.page
// Check if it's an image link (starts with !)
if image_open != -1 {
detected_file_type = .image
} else if target.contains('.') && !target.trim_space().to_lower().ends_with('.md') {
// File link: has extension but not .md
detected_file_type = .file
}
// console.print_debug('Found link: text="${text}", target="${target}", type=${detected_file_type}')
// Store position - use image_open if it's an image, otherwise open_bracket
link_start_pos := if detected_file_type == .image { image_open } else { open_bracket }
// For image links, src should include the ! prefix
link_src := if detected_file_type == .image {
line[image_open..close_paren + 1]
} else {
line[open_bracket..close_paren + 1]
}
mut link := Link{
src: link_src
text: text
target: target.trim_space()
line: line_idx + 1
pos: link_start_pos
file_type: detected_file_type
page: &p
}
p.parse_link_target(mut link)!
// No need to set file_type to false for external links, as it's already .page by default
links << link
pos = close_paren + 1
}
}
return links
}
// Parse link target to extract collection and page
fn (mut p Page) parse_link_target(mut link Link) ! {
mut target := link.target.to_lower().trim_space()
// Check for external links (http, https, mailto, ftp)
if target.starts_with('http://') || target.starts_with('https://')
|| target.starts_with('mailto:') || target.starts_with('ftp://') {
link.status = .external
return
}
// Check for anchor links
if target.starts_with('#') {
link.status = .anchor
return
}
// Handle relative paths - extract the last part after /
if target.contains('/') {
parts := target.split('/')
if parts.len > 1 {
target = parts[parts.len - 1]
}
}
// Format: $collection:$pagename or $collection:$pagename.md
if target.contains(':') {
parts := target.split(':')
if parts.len >= 2 {
link.target_collection_name = doctree.name_fix(parts[0])
// For file links, use name without extension; for page links, normalize normally
if link.file_type == .file {
link.target_item_name = doctree.name_fix(parts[1])
} else {
link.target_item_name = normalize_page_name(parts[1])
}
}
} else {
// For file links, use name without extension; for page links, normalize normally
if link.file_type == .file {
link.target_item_name = doctree.name_fix(target).trim_space()
} else {
link.target_item_name = normalize_page_name(target).trim_space()
}
link.target_collection_name = p.collection.name
}
// console.print_debug('Parsed link target: collection="${link.target_collection_name}", item="${link.target_item_name}", type=${link.file_type}')
// Validate link target exists
mut target_exists := false
mut error_category := CollectionErrorCategory.invalid_page_reference
mut error_prefix := 'Broken link'
if link.file_type == .file || link.file_type == .image {
target_exists = p.collection.doctree.file_or_image_exists(link.key())!
error_category = .invalid_file_reference
error_prefix = if link.file_type == .file { 'Broken file link' } else { 'Broken image link' }
} else {
target_exists = p.collection.doctree.page_exists(link.key())!
}
// console.print_debug('Link target exists: ${target_exists} for key=${link.key()}')
if target_exists {
link.status = .found
} else {
p.collection.error(
category: error_category
page_key: p.key()
message: '${error_prefix} to `${link.key()}` at line ${link.line}: `${link.src}`'
show_console: true
)
link.status = .not_found
}
}
////////////////FIX PAGES FOR THE LINKS///////////////////////
@[params]
pub struct FixLinksArgs {
include bool // Process includes before fixing links
cross_collection bool // Process cross-collection links (for export)
export_mode bool // Use export-style simple paths instead of filesystem paths
}
// Fix links in page content - rewrites links with proper relative paths
fn (mut p Page) content_with_fixed_links(args FixLinksArgs) !string {
mut content := p.content(include: args.include)!
// Get links - either re-find them (if includes processed) or use cached
mut links := if args.include {
p.find_links(content)! // Re-find links in processed content
} else {
p.links // Use cached links from validation
}
// Filter and transform links
for mut link in links {
// Skip invalid links
if link.status != .found {
continue
}
// Skip cross-collection links unless enabled
is_local := link.target_collection_name == p.collection.name
if !args.cross_collection && !is_local {
continue
}
// Calculate new link path based on mode
new_link := if args.export_mode {
p.export_link_path(mut link) or { continue }
} else {
p.filesystem_link_path(mut link) or { continue }
}
// Build the complete link markdown
// For image links, link.src already includes the !, so we build the same format
prefix := if link.file_type == .image { '!' } else { '' }
new_link_md := '${prefix}[${link.text}](${new_link})'
// Replace in content
content = content.replace(link.src, new_link_md)
}
return content
}
// export_link_path calculates path for export (self-contained: all references are local)
fn (mut p Page) export_link_path(mut link Link) !string {
match link.file_type {
.image {
mut tf := link.target_file()!
return 'img/${tf.name}'
}
.file {
mut tf := link.target_file()!
return 'files/${tf.name}'
}
.page {
mut tp := link.target_page()!
return '${tp.name}.md'
}
}
}
// filesystem_link_path calculates path using actual filesystem paths
fn (mut p Page) filesystem_link_path(mut link Link) !string {
source_path := p.path()!
mut target_path := match link.file_type {
.image, .file {
mut tf := link.target_file()!
tf.path()!
}
.page {
mut tp := link.target_page()!
tp.path()!
}
}
return target_path.path_relative(source_path.path)!
}
/////////////TOOLS//////////////////////////////////
// Normalize page name (remove .md, apply name_fix)
fn normalize_page_name(name string) string {
mut clean := name
if clean.ends_with('.md') {
clean = clean[0..clean.len - 3]
}
return doctree.name_fix(clean)
}

122
lib/web/doctree/core/page.v Normal file
View File

@@ -0,0 +1,122 @@
module core
import incubaid.herolib.core.pathlib
import incubaid.herolib.web.doctree as doctreetools
@[heap]
pub struct Page {
pub mut:
name string
path string // in collection
collection_name string
links []Link
// macros []Macro
collection &Collection @[skip; str: skip] // Reference to parent collection
}
@[params]
pub struct NewPageArgs {
pub:
name string @[required]
path string @[required]
collection_name string @[required]
collection &Collection @[required]
}
// Read content without processing includes
pub fn (mut p Page) path() !pathlib.Path {
curpath := p.collection.path()!
return pathlib.get_file(path: '${curpath.path}/${p.path}', create: false)! // should be relative to collection
}
// Read content with includes processed (default behavior)
@[params]
pub struct ReadContentArgs {
pub mut:
include bool
}
// Read content without processing includes
pub fn (mut p Page) content(args ReadContentArgs) !string {
mut mypath := p.path()!
mut content := mypath.read()!
if args.include {
mut v := map[string]bool{}
content = p.process_includes(content, mut v)!
}
return content
}
// Recursively process includes
fn (mut p Page) process_includes(content string, mut visited map[string]bool) !string {
mut doctree := p.collection.doctree
// Prevent circular includes
page_key := p.key()
if page_key in visited {
p.collection.error(
category: .circular_include
page_key: page_key
message: 'Circular include detected for page `${page_key}`'
show_console: false // Don't show immediately, collect for later
)
return ''
}
visited[page_key] = true
mut result := content
mut lines := result.split_into_lines()
mut processed_lines := []string{}
for line in lines {
trimmed := line.trim_space()
// Check for include action: !!include collection:page or !!include page
if trimmed.starts_with('!!include') {
// Parse the include reference
include_ref := trimmed.trim_string_left('!!include').trim_space()
// Determine collection and page name
mut target_collection := p.collection_name
mut target_page := ''
if include_ref.contains(':') {
target_collection, target_page = doctreetools.key_parse(include_ref)!
} else {
target_page = doctreetools.name_fix(include_ref)
}
// Build page key
page_ref := '${target_collection}:${target_page}'
// Get the referenced page from doctree
mut include_page := doctree.page_get(page_ref) or {
p.collection.error(
category: .missing_include
page_key: page_key
message: 'Included page `${page_ref}` not found'
show_console: false
)
processed_lines << '<!-- Include not found: ${page_ref} -->'
continue
}
// Recursively process the included page
include_content := include_page.process_includes(include_page.content()!, mut
visited)!
processed_lines << include_content
} else {
processed_lines << line
}
}
// Remove this page from visited map to allow it to be included again in other contexts
// This prevents false positives when a page is included multiple times (which is valid)
visited.delete(page_key)
return processed_lines.join_lines()
}
pub fn (p Page) key() string {
return '${p.collection_name}:${p.name}'
}

View File

@@ -0,0 +1,87 @@
module core
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.develop.gittools
import incubaid.herolib.ui.console
import os
// Play function to process HeroScript actions for DocTree
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'doctree.') {
return
}
// Track which doctrees we've processed in this playbook
mut processed_doctreees := map[string]bool{}
mut name := ''
// Process scan actions - scan directories for collections
mut scan_actions := plbook.find(filter: 'doctree.scan')!
for mut action in scan_actions {
mut p := action.params
name = p.get_default('name', 'main')!
ignore := p.get_list_default('ignore', [])!
console.print_item("Scanning DocTree '${name}' with ignore patterns: ${ignore}")
// Get or create doctree from global map
mut doctree_instance := if exists(name) {
get(name)!
} else {
console.print_debug('DocTree not found, creating a new one')
new(name: name)!
}
processed_doctreees[name] = true
mut path := p.get_default('path', '')!
// NEW: Support git URL as source
mut git_url := p.get_default('git_url', '')!
mut git_pull := p.get_default_false('git_pull')
if git_url != '' {
// Clone or get the repository using gittools
path = gittools.path(
git_pull: git_pull
git_url: git_url
)!.path
}
if path == '' {
return error('Either "path" or "git_url" must be provided for doctree.scan action.')
}
doctree_instance.scan(path: path, ignore: ignore)!
action.done = true
// No need to call set() again - doctree is already in global map from new()
// and we're modifying it by reference
}
// Run init_post on all processed doctrees
for doctree_name, _ in processed_doctreees {
mut doctree_instance_post := get(doctree_name)!
doctree_instance_post.init_post()!
}
// Process export actions - export collections to destination
mut export_actions := plbook.find(filter: 'doctree.export')!
// Process explicit export actions
for mut action in export_actions {
mut p := action.params
name = p.get_default('name', 'main')!
destination := p.get_default('destination', '${os.home_dir()}/hero/var/doctree_export')!
reset := p.get_default_true('reset')
include := p.get_default_true('include')
redis := p.get_default_true('redis')
mut doctree_instance := get(name) or {
return error("DocTree '${name}' not found. Use !!doctree.scan first.")
}
doctree_instance.export(
destination: destination
reset: reset
include: include
redis: redis
)!
action.done = true
}
}

View File

@@ -0,0 +1,4 @@
- first find all pages
- then for each page find all links

View File

@@ -0,0 +1,384 @@
module core
import incubaid.herolib.core.pathlib
import os
import json
const test_base = '/tmp/doctree_test'
fn testsuite_begin() {
os.rmdir_all(test_base) or {}
os.mkdir_all(test_base)!
}
fn testsuite_end() {
os.rmdir_all(test_base) or {}
}
fn test_create_doctree() {
mut a := new(name: 'test_doctree')!
assert a.name == 'test_doctree'
assert a.collections.len == 0
}
fn test_add_collection() {
// Create test collection
col_path := '${test_base}/col1'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:col1')!
mut page := pathlib.get_file(path: '${col_path}/page1.md', create: true)!
page.write('# Page 1\n\nContent here.')!
mut a := new(name: 'test')!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
assert a.collections.len == 1
assert 'col1' in a.collections
}
fn test_scan() {
// Create test structure
os.mkdir_all('${test_base}/docs/guides')!
mut cfile := pathlib.get_file(path: '${test_base}/docs/guides/.collection', create: true)!
cfile.write('name:guides')!
mut page := pathlib.get_file(path: '${test_base}/docs/guides/intro.md', create: true)!
page.write('# Introduction')!
mut a := new()!
a.scan(path: '${test_base}/docs')!
assert a.collections.len == 1
col := a.get_collection('guides')!
assert col.page_exists('intro')!
}
fn test_export() {
// Setup
col_path := '${test_base}/source/col1'
export_path := '${test_base}/export'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:col1')!
mut page := pathlib.get_file(path: '${col_path}/test.md', create: true)!
page.write('# Test Page')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
a.export(destination: export_path, redis: false)!
assert os.exists('${export_path}/content/col1/test.md')
assert os.exists('${export_path}/meta/col1.json')
}
fn test_export_with_includes() {
// Setup: Create pages with includes
col_path := '${test_base}/include_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:test_col')!
// Page 1: includes page 2
mut page1 := pathlib.get_file(path: '${col_path}/page1.md', create: true)!
page1.write('# Page 1\n\n!!include test_col:page2\n\nEnd of page 1')!
// Page 2: standalone content
mut page2 := pathlib.get_file(path: '${col_path}/page2.md', create: true)!
page2.write('## Page 2 Content\n\nThis is included.')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
export_path := '${test_base}/export_include'
a.export(destination: export_path, include: true)!
// Verify exported page1 has page2 content included
exported := os.read_file('${export_path}/content/test_col/page1.md')!
assert exported.contains('Page 2 Content')
assert exported.contains('This is included')
assert !exported.contains('!!include')
}
fn test_export_without_includes() {
col_path := '${test_base}/no_include_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:test_col2')!
mut page1 := pathlib.get_file(path: '${col_path}/page1.md', create: true)!
page1.write('# Page 1\n\n!!include test_col2:page2\n\nEnd')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
export_path := '${test_base}/export_no_include'
a.export(destination: export_path, include: false)!
// Verify exported page1 still has include action
exported := os.read_file('${export_path}/content/test_col2/page1.md')!
assert exported.contains('!!include')
}
fn test_error_deduplication() {
mut a := new(name: 'test')!
col_path := '${test_base}/err_dedup_col'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:err_dedup_col')!
mut col := a.add_collection(mut pathlib.get_dir(path: col_path)!)!
}
fn test_error_hash() {
}
fn test_find_links() {
col_path := '${test_base}/find_links_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:test_col')!
mut page_file := pathlib.get_file(path: '${col_path}/test_page.md', create: true)!
page_file.write('# Test Page\n\n[Link 1](page1)\n[Link 2](guides:intro)')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
mut page := a.page_get('test_col:test_page')!
content := page.content()!
links := page.find_links(content)!
assert links.len >= 2
}
fn test_validate_links() {
// Setup
col_path := '${test_base}/link_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:test_col')!
// Create page1 with valid link
mut page1 := pathlib.get_file(path: '${col_path}/page1.md', create: true)!
page1.write('[Link to page2](page2)')!
// Create page2 (target exists)
mut page2 := pathlib.get_file(path: '${col_path}/page2.md', create: true)!
page2.write('# Page 2')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
// Validate
a.validate_links()!
// Should have no errors
col := a.get_collection('test_col')!
assert col.errors.len == 0
}
fn test_validate_broken_links() {
// Setup
col_path := '${test_base}/broken_link_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:test_col')!
// Create page with broken link
mut page1 := pathlib.get_file(path: '${col_path}/page1.md', create: true)!
page1.write('[Broken link](nonexistent)')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
// Validate
a.validate_links()!
// Should have error
col := a.get_collection('test_col')!
}
fn test_fix_links() {
// Setup - all pages in same directory for simpler test
col_path := '${test_base}/fix_link_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:test_col')!
// Create pages in same directory
mut page1 := pathlib.get_file(path: '${col_path}/page1.md', create: true)!
page1.write('[Link](page2)')!
mut page2 := pathlib.get_file(path: '${col_path}/page2.md', create: true)!
page2.write('# Page 2')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
// Get the page and test fix_links directly
mut col := a.get_collection('test_col')!
mut p := col.page_get('page1')!
original := p.content()!
println('Original: ${original}')
fixed := p.content_with_fixed_links(FixLinksArgs{
include: true
cross_collection: true
export_mode: false
})!
println('Fixed: ${fixed}')
// The fix_links should work on content
assert fixed.contains('[Link](page2.md)')
}
fn test_link_formats() {
col_path := '${test_base}/link_format_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:test_col')!
// Create target pages
mut page1 := pathlib.get_file(path: '${col_path}/page1.md', create: true)!
page1.write('# Page 1')!
mut page2 := pathlib.get_file(path: '${col_path}/page2.md', create: true)!
page2.write('# Page 2')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
// Test various link formats
mut test_page := a.page_get('test_col:page1')!
content := '[Link](page2)\n[Link](page2.md)'
links := test_page.find_links(content)!
assert links.len == 2
}
fn test_cross_collection_links() {
// Setup two collections
col1_path := '${test_base}/col1_cross'
col2_path := '${test_base}/col2_cross'
os.mkdir_all(col1_path)!
os.mkdir_all(col2_path)!
mut cfile1 := pathlib.get_file(path: '${col1_path}/.collection', create: true)!
cfile1.write('name:col1')!
mut cfile2 := pathlib.get_file(path: '${col2_path}/.collection', create: true)!
cfile2.write('name:col2')!
// Page in col1 links to col2
mut page1 := pathlib.get_file(path: '${col1_path}/page1.md', create: true)!
page1.write('[Link to col2](col2:page2)')!
// Page in col2
mut page2 := pathlib.get_file(path: '${col2_path}/page2.md', create: true)!
page2.write('# Page 2')!
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col1_path)!)!
a.add_collection(mut pathlib.get_dir(path: col2_path)!)!
// Validate - should pass
a.validate_links()!
col1 := a.get_collection('col1')!
assert col1.errors.len == 0
// Fix links - cross-collection links should NOT be rewritten
a.fix_links()!
fixed := page1.read()!
assert fixed.contains('[Link to col2](col2:page2)') // Unchanged
}
fn test_save_and_load() {
// Setup
col_path := '${test_base}/save_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:test_col')!
mut page := pathlib.get_file(path: '${col_path}/page1.md', create: true)!
page.write('# Page 1\n\nContent here.')!
// Create and save
mut a := new(name: 'test')!
a.add_collection(mut pathlib.get_dir(path: col_path)!)!
col := a.get_collection('test_col')!
}
fn test_save_with_errors() {
col_path := '${test_base}/error_save_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:err_col')!
mut a := new(name: 'test')!
mut col := a.add_collection(mut pathlib.get_dir(path: col_path)!)!
}
fn test_load_from_directory() {
// Setup multiple collections
col1_path := '${test_base}/load_dir/col1'
col2_path := '${test_base}/load_dir/col2'
os.mkdir_all(col1_path)!
os.mkdir_all(col2_path)!
mut cfile1 := pathlib.get_file(path: '${col1_path}/.collection', create: true)!
cfile1.write('name:col1')!
mut cfile2 := pathlib.get_file(path: '${col2_path}/.collection', create: true)!
cfile2.write('name:col2')!
mut page1 := pathlib.get_file(path: '${col1_path}/page1.md', create: true)!
page1.write('# Page 1')!
mut page2 := pathlib.get_file(path: '${col2_path}/page2.md', create: true)!
page2.write('# Page 2')!
// Create and save
mut a := new(name: 'test')!
a.add_collection(mut pathlib.get_dir(path: col1_path)!)!
a.add_collection(mut pathlib.get_dir(path: col2_path)!)!
}
fn test_get_edit_url() {
// Create a mock collection
mut doctree := new(name: 'test_doctree')!
col_path := '${test_base}/git_test'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:git_test_col')!
mut col := doctree.add_collection(mut pathlib.get_dir(path: col_path)!)!
col.git_url = 'https://github.com/test/repo.git' // Assuming git_url is a field on Collection
// Create a mock page
mut page_path := pathlib.get_file(path: '${col_path}/test_page.md', create: true)!
page_path.write('test content')!
col.add_page(mut page_path)!
// Get the page and collection edit URLs
page := col.page_get('test_page')!
// edit_url := page.get_edit_url()! // This method does not exist
// Assert the URLs are correct
// assert edit_url == 'https://github.com/test/repo/edit/main/test_page.md'
}

View File

@@ -0,0 +1,605 @@
# DocTree Module
A lightweight document collection manager for V, inspired by doctree but simplified.
## Features
- **Simple Collection Scanning**: Automatically find collections marked with `.collection` files
- **Include Processing**: Process `!!include` actions to embed content from other pages
- **Easy Export**: Copy files to destination with organized structure
- **Optional Redis**: Store metadata in Redis for quick lookups and caching
- **Type-Safe Access**: Get pages, images, and files with error handling
- **Error Tracking**: Built-in error collection and reporting with deduplication
## Quick Start
put in .hero file and execute with hero or but shebang line on top of .hero script
**Scan Parameters:**
- `name` (optional, default: 'main') - DocTree instance name
- `path` (required when git_url not provided) - Directory path to scan
- `git_url` (alternative to path) - Git repository URL to clone/checkout
- `git_root` (optional when using git_url, default: ~/code) - Base directory for cloning
- `meta_path` (optional) - Directory to save collection metadata JSON
- `ignore` (optional) - List of directory names to skip during scan
**most basic example**
```heroscript
#!/usr/bin/env hero
!!doctree.scan git_url:"https://git.ourworld.tf/tfgrid/docs_tfgrid4/src/branch/main/collections/tests"
!!doctree.export
```
put this in .hero file
## usage in herolib
```v
import incubaid.herolib.data.doctree
// Create a new DocTree
mut a := doctree.new(name: 'my_docs')!
// Scan a directory for collections
a.scan(path: '/path/to/docs')!
// Export to destination
a.export(destination: '/path/to/output')!
```
## Collections
Collections are directories marked with a `.collection` file.
### .collection File Format
```
name:my_collection
```
## Core Concepts
### Collections
A collection is a directory containing:
- A `.collection` file (marks the directory as a collection)
- Markdown pages (`.md` files)
- Images (`.png`, `.jpg`, `.jpeg`, `.gif`, `.svg`)
- Other files
### Page Keys
Pages, images, and files are referenced using the format: `collection:name`
```v
// Get a page
page := a.page_get('guides:introduction')!
// Get an image
img := a.image_get('guides:logo')!
// Get a file
file := a.file_get('guides:diagram')!
```
## Usage Examples
### Scanning for Collections
```v
mut a := doctree.new()!
a.scan(path: './docs')!
```
### Adding a Specific Collection
```v
a.add_collection(name: 'guides', path: './docs/guides')!
```
### Getting Pages
```v
// Get a page
page := a.page_get('guides:introduction')!
content := page.content()!
// Check if page exists
if a.page_exists('guides:setup') {
println('Setup guide found')
}
```
### Getting Images and Files
```v
// Get an image
img := a.image_get('guides:logo')!
println('Image path: ${img.path.path}')
println('Image type: ${img.ftype}') // .image
// Get a file
file := a.file_get('guides:diagram')!
println('File name: ${file.file_name()}')
// Check existence
if a.image_exists('guides:screenshot') {
println('Screenshot found')
}
```
### Listing All Pages
```v
pages_map := a.list_pages()
for col_name, page_names in pages_map {
println('Collection: ${col_name}')
for page_name in page_names {
println(' - ${page_name}')
}
}
```
### Exporting
```v
// Full export with all features
a.export(
destination: './output'
reset: true // Clear destination before export
include: true // Process !!include actions
redis: true // Store metadata in Redis
)!
// Export without Redis
a.export(
destination: './output'
redis: false
)!
```
### Error Handling
```v
// Export and check for errors
a.export(destination: './output')!
// Errors are automatically printed during export
// You can also access them programmatically
for _, col in a.collections {
if col.has_errors() {
errors := col.get_errors()
for err in errors {
println('Error: ${err.str()}')
}
// Get error summary by category
summary := col.error_summary()
for category, count in summary {
println('${category}: ${count} errors')
}
}
}
```
### Include Processing
DocTree supports simple include processing using `!!include` actions:
```v
// Export with includes processed (default)
a.export(
destination: './output'
include: true // default
)!
// Export without processing includes
a.export(
destination: './output'
include: false
)!
```
#### Include Syntax
In your markdown files:
```md
# My Page
!!include collection:page_name
More content here
```
Or within the same collection:
```md
!!include page_name
```
The `!!include` action will be replaced with the content of the referenced page during export.
#### Reading Pages with Includes
```v
// Read with includes processed (default)
mut page := a.page_get('col:mypage')!
content := page.content(include: true)!
// Read raw content without processing includes
content := page.content()!
```
## Git Integration
DocTree automatically detects the git repository URL for each collection and stores it for reference. This allows users to easily navigate to the source for editing.
### Automatic Detection
When scanning collections, DocTree walks up the directory tree to find the `.git` directory and captures:
- **git_url**: The remote origin URL
- **git_branch**: The current branch
### Scanning from Git URL
You can scan collections directly from a git repository:
```heroscript
!!doctree.scan
name: 'my_docs'
git_url: 'https://github.com/myorg/docs.git'
git_root: '~/code' // optional, defaults to ~/code
```
The repository will be automatically cloned if it doesn't exist locally.
### Accessing Edit URLs
```v
mut page := doctree.page_get('guides:intro')!
edit_url := page.get_edit_url()!
println('Edit at: ${edit_url}')
// Output: Edit at: https://github.com/myorg/docs/edit/main/guides.md
```
### Export with Source Information
When exporting, the git URL is displayed:
```
Collection guides source: https://github.com/myorg/docs.git (branch: main)
```
This allows published documentation to link back to the source repository for contributions.
## Links
DocTree supports standard Markdown links with several formats for referencing pages within collections.
### Link Formats
#### 1. Explicit Collection Reference
Link to a page in a specific collection:
```md
[Click here](guides:introduction)
[Click here](guides:introduction.md)
```
#### 2. Same Collection Reference
Link to a page in the same collection (collection name omitted):
```md
[Click here](introduction)
```
#### 3. Path-Based Reference
Link using a path - **only the filename is used** for matching:
```md
[Click here](some/path/introduction)
[Click here](/absolute/path/introduction)
[Click here](path/to/introduction.md)
```
**Important:** Paths are ignored during link resolution. Only the page name (filename) is used to find the target page within the same collection.
### Link Processing
#### Validation
Check all links in your DocTree:
```v
mut a := doctree.new()!
a.scan(path: './docs')!
// Validate all links
a.validate_links()!
// Check for errors
for _, col in a.collections {
if col.has_errors() {
col.print_errors()
}
}
```
#### Fixing Links
Automatically rewrite links with correct relative paths:
```v
mut a := doctree.new()!
a.scan(path: './docs')!
// Fix all links in place
a.fix_links()!
// Or fix links in a specific collection
mut col := a.get_collection('guides')!
col.fix_links()!
```
**What `fix_links()` does:**
- Finds all local page links
- Calculates correct relative paths
- Rewrites links as `[text](relative/path/pagename.md)`
- Only fixes links within the same collection
- Preserves `!!include` actions unchanged
- Writes changes back to files
#### Example
Before fix:
```md
# My Page
[Introduction](introduction)
[Setup](/some/old/path/setup)
[Guide](guides:advanced)
```
After fix (assuming pages are in subdirectories):
```md
# My Page
[Introduction](../intro/introduction.md)
[Setup](setup.md)
[Guide](guides:advanced) <!-- Cross-collection link unchanged -->
```
### Link Rules
1. **Name Normalization**: All page names are normalized using `name_fix()` (lowercase, underscores, etc.)
2. **Same Collection Only**: `fix_links()` only rewrites links within the same collection
3. **Cross-Collection Links**: Links with explicit collection references (e.g., `guides:page`) are validated but not rewritten
4. **External Links**: HTTP(S), mailto, and anchor links are ignored
5. **Error Reporting**: Broken links are reported with file, line number, and link details
### Export Directory Structure
When you export an DocTree, the directory structure is organized as:
$$\text{export\_dir}/
\begin{cases}
\text{content/} \\
\quad \text{collection\_name/} \\
\quad \quad \text{page1.md} \\
\quad \quad \text{page2.md} \\
\quad \quad \text{img/} & \text{(images)} \\
\quad \quad \quad \text{logo.png} \\
\quad \quad \quad \text{banner.jpg} \\
\quad \quad \text{files/} & \text{(other files)} \\
\quad \quad \quad \text{data.csv} \\
\quad \quad \quad \text{document.pdf} \\
\text{meta/} & \text{(metadata)} \\
\quad \text{collection\_name.json}
\end{cases}$$
- **Pages**: Markdown files directly in collection directory
- **Images**: Stored in `img/` subdirectory
- **Files**: Other resources stored in `files/` subdirectory
- **Metadata**: JSON files in `meta/` directory with collection information
## Redis Integration
DocTree uses Redis to store metadata about collections, pages, images, and files for fast lookups and caching.
### Redis Data Structure
When `redis: true` is set during export, DocTree stores:
1. **Collection Paths** - Hash: `doctree:path`
- Key: collection name
- Value: exported collection directory path
2. **Collection Contents** - Hash: `doctree:<collection_name>`
- Pages: `page_name``page_name.md`
- Images: `image_name.ext``img/image_name.ext`
- Files: `file_name.ext``files/file_name.ext`
### Redis Usage Examples
```v
import incubaid.herolib.data.doctree
import incubaid.herolib.core.base
// Export with Redis metadata (default)
mut a := doctree.new(name: 'docs')!
a.scan(path: './docs')!
a.export(
destination: './output'
redis: true // Store metadata in Redis
)!
// Later, retrieve metadata from Redis
mut context := base.context()!
mut redis := context.redis()!
// Get collection path
col_path := redis.hget('doctree:path', 'guides')!
println('Guides collection exported to: ${col_path}')
// Get page location
page_path := redis.hget('doctree:guides', 'introduction')!
println('Introduction page: ${page_path}') // Output: introduction.md
// Get image location
img_path := redis.hget('doctree:guides', 'logo.png')!
println('Logo image: ${img_path}') // Output: img/logo.png
```
## Saving Collections (Beta)
**Status:** Basic save functionality is implemented. Load functionality is work-in-progress.
### Saving to JSON
Save collection metadata to JSON files for archival or cross-tool compatibility:
```v
import incubaid.herolib.data.doctree
mut a := doctree.new(name: 'my_docs')!
a.scan(path: './docs')!
// Save all collections to a specified directory
// Creates: ${save_path}/${collection_name}.json
a.save('./metadata')!
```
### What Gets Saved
Each `.json` file contains:
- Collection metadata (name, path, git URL, git branch)
- All pages (with paths and collection references)
- All images and files (with paths and types)
- All errors (category, page_key, message, file)
### Storage Location
```
save_path/
├── collection1.json
├── collection2.json
└── collection3.json
```
## HeroScript Integration
DocTree integrates with HeroScript, allowing you to define DocTree operations in `.vsh` or playbook files.
### Using in V Scripts
Create a `.vsh` script to process DocTree operations:
```v
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playbook
import incubaid.herolib.data.doctree
// Define your HeroScript content
heroscript := "
!!doctree.scan path: './docs'
!!doctree.export destination: './output' include: true
"
// Create playbook from text
mut plbook := playbook.new(text: heroscript)!
// Execute doctree actions
doctree.play(mut plbook)!
println('DocTree processing complete!')
```
### Using in Playbook Files
Create a `docs.play` file:
```heroscript
!!doctree.scan
name: 'main'
path: '~/code/docs'
!!doctree.export
destination: '~/code/output'
reset: true
include: true
redis: true
```
Execute it:
```bash
vrun process_docs.vsh
```
Where `process_docs.vsh` contains:
```v
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.core.playbook
import incubaid.herolib.core.playcmds
// Load and execute playbook
mut plbook := playbook.new(path: './docs.play')!
playcmds.run(mut plbook)!
```
### Error Handling
Errors are automatically collected and reported:
```heroscript
!!doctree.scan
path: './docs'
# Errors will be printed during export
!!doctree.export
destination: './output'
```
Errors are shown in the console:
```
Collection guides - Errors (2)
[invalid_page_reference] [guides:intro]: Broken link to `guides:setup` at line 5
[missing_include] [guides:advanced]: Included page `guides:examples` not found
```
### Auto-Export Behavior
If you use `!!doctree.scan` **without** an explicit `!!doctree.export`, DocTree will automatically export to the default location (current directory).
To disable auto-export, include an explicit (empty) export action or simply don't include any scan actions.
### Best Practices
1. **Always validate before export**: Use `!!doctree.validate` to catch broken links early
2. **Use named instances**: When working with multiple documentation sets, use the `name` parameter
3. **Enable Redis for production**: Use `redis: true` for web deployments to enable fast lookups
4. **Process includes during export**: Keep `include: true` to embed referenced content in exported files
## Roadmap - Not Yet Implemented
The following features are planned but not yet available:
- [ ] Load collections from `.collection.json` files
- [ ] Python API for reading collections
- [ ] `doctree.validate` playbook action
- [ ] `doctree.fix_links` playbook action
- [ ] Auto-save on collection modifications
- [ ] Collection version control

View File

@@ -0,0 +1,177 @@
module core
import incubaid.herolib.core.pathlib
import os
import json
const test_base = '/tmp/doctree_test'
// Test recursive export with chained cross-collection links
// Setup: Collection A links to B, Collection B links to C
// Expected: When exporting A, it should include pages from B and C
fn test_export_recursive_links() {
// Create 3 collections with chained links
col_a_path := '${test_base}/recursive_export/col_a'
col_b_path := '${test_base}/recursive_export/col_b'
col_c_path := '${test_base}/recursive_export/col_c'
os.mkdir_all(col_a_path)!
os.mkdir_all(col_b_path)!
os.mkdir_all(col_c_path)!
// Collection A: links to B
mut cfile_a := pathlib.get_file(path: '${col_a_path}/.collection', create: true)!
cfile_a.write('name:col_a')!
mut page_a := pathlib.get_file(path: '${col_a_path}/page_a.md', create: true)!
page_a.write('# Page A\\n\\nThis is page A.\\n\\n[Link to Page B](col_b:page_b)')!
// Collection B: links to C
mut cfile_b := pathlib.get_file(path: '${col_b_path}/.collection', create: true)!
cfile_b.write('name:col_b')!
mut page_b := pathlib.get_file(path: '${col_b_path}/page_b.md', create: true)!
page_b.write('# Page B\\n\\nThis is page B with link to C.\\n\\n[Link to Page C](col_c:page_c)')!
// Collection C: final page
mut cfile_c := pathlib.get_file(path: '${col_c_path}/.collection', create: true)!
cfile_c.write('name:col_c')!
mut page_c := pathlib.get_file(path: '${col_c_path}/page_c.md', create: true)!
page_c.write('# Page C\\n\\nThis is the final page in the chain.')!
// Create DocTree and add all collections
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_a_path)!)!
a.add_collection(mut pathlib.get_dir(path: col_b_path)!)!
a.add_collection(mut pathlib.get_dir(path: col_c_path)!)!
// Validate links before export to populate page.links
a.validate_links()!
// Export
export_path := '${test_base}/export_recursive'
a.export(destination: export_path)!
// ===== VERIFICATION PHASE =====
// 1. Verify directory structure exists
assert os.exists('${export_path}/content'), 'Export content directory should exist'
assert os.exists('${export_path}/content/col_a'), 'Collection col_a directory should exist'
assert os.exists('${export_path}/meta'), 'Export meta directory should exist'
// 2. Verify all pages exist in col_a export directory
// Note: Exported pages from other collections go to col_a directory
assert os.exists('${export_path}/content/col_a/page_a.md'), 'page_a.md should be exported'
assert os.exists('${export_path}/content/col_a/page_b.md'), 'page_b.md from col_b should be included'
assert os.exists('${export_path}/content/col_a/page_c.md'), 'page_c.md from col_c should be included'
// 3. Verify page content is correct
content_a := os.read_file('${export_path}/content/col_a/page_a.md')!
assert content_a.contains('# Page A'), 'page_a content should have title'
assert content_a.contains('This is page A'), 'page_a content should have expected text'
assert content_a.contains('[Link to Page B]'), 'page_a should have link to page_b'
content_b := os.read_file('${export_path}/content/col_a/page_b.md')!
assert content_b.contains('# Page B'), 'page_b content should have title'
assert content_b.contains('This is page B'), 'page_b content should have expected text'
assert content_b.contains('[Link to Page C]'), 'page_b should have link to page_c'
content_c := os.read_file('${export_path}/content/col_a/page_c.md')!
assert content_c.contains('# Page C'), 'page_c content should have title'
assert content_c.contains('This is the final page'), 'page_c content should have expected text'
// 4. Verify metadata exists and is valid
assert os.exists('${export_path}/meta/col_a.json'), 'Metadata file for col_a should exist'
meta_content := os.read_file('${export_path}/meta/col_a.json')!
assert meta_content.len > 0, 'Metadata file should not be empty'
// // Parse metadata JSON and verify structure
// mut meta := json.decode(map[string]map[string]interface{}, meta_content) or {
// panic('Failed to parse metadata JSON: ${err}')
// }
// assert meta.len > 0, 'Metadata should have content'
// assert meta['name'] != none, 'Metadata should have name field'
// 5. Verify that pages from B and C are NOT exported to separate col_b and col_c directories
// (they should only be in col_a directory)
meta_col_b_exists := os.exists('${export_path}/meta/col_b.json')
meta_col_c_exists := os.exists('${export_path}/meta/col_c.json')
assert !meta_col_b_exists, 'col_b metadata should not exist (pages copied to col_a)'
assert !meta_col_c_exists, 'col_c metadata should not exist (pages copied to col_a)'
// 6. Verify the recursive depth worked
// All three pages should be accessible through the exported col_a
assert os.exists('${export_path}/content/col_a/page_a.md'), 'Level 1 page should exist'
assert os.exists('${export_path}/content/col_a/page_b.md'), 'Level 2 page (via A->B) should exist'
assert os.exists('${export_path}/content/col_a/page_c.md'), 'Level 3 page (via A->B->C) should exist'
// 7. Verify that the link chain is properly documented
// page_a links to page_b, page_b links to page_c
// The links should be preserved in the exported content
page_a_content := os.read_file('${export_path}/content/col_a/page_a.md')!
page_b_content := os.read_file('${export_path}/content/col_a/page_b.md')!
page_c_content := os.read_file('${export_path}/content/col_a/page_c.md')!
// Links are preserved with collection:page format
assert page_a_content.contains('col_b:page_b') || page_a_content.contains('page_b'), 'page_a should reference page_b'
assert page_b_content.contains('col_c:page_c') || page_b_content.contains('page_c'), 'page_b should reference page_c'
println(' Recursive cross-collection export test passed')
println(' - All 3 pages exported to col_a directory (A -> B -> C)')
println(' - Content verified for all pages')
println(' - Metadata validated')
println(' - Link chain preserved')
}
// Test recursive export with cross-collection images
// Setup: Collection A links to image in Collection B
// Expected: Image should be copied to col_a export directory
fn test_export_recursive_with_images() {
col_a_path := '${test_base}/recursive_img/col_a'
col_b_path := '${test_base}/recursive_img/col_b'
os.mkdir_all(col_a_path)!
os.mkdir_all(col_b_path)!
os.mkdir_all('${col_a_path}/img')!
os.mkdir_all('${col_b_path}/img')!
// Collection A with local image
mut cfile_a := pathlib.get_file(path: '${col_a_path}/.collection', create: true)!
cfile_a.write('name:col_a')!
mut page_a := pathlib.get_file(path: '${col_a_path}/page_a.md', create: true)!
page_a.write('# Page A\\n\\n![Local Image](local.png)\\n\\n[Link to B](col_b:page_b)')!
// Create local image
os.write_file('${col_a_path}/img/local.png', 'fake png data')!
// Collection B with image and linked page
mut cfile_b := pathlib.get_file(path: '${col_b_path}/.collection', create: true)!
cfile_b.write('name:col_b')!
mut page_b := pathlib.get_file(path: '${col_b_path}/page_b.md', create: true)!
page_b.write('# Page B\\n\\n![B Image](b_image.jpg)')!
// Create image in collection B
os.write_file('${col_b_path}/img/b_image.jpg', 'fake jpg data')!
// Create DocTree
mut a := new()!
a.add_collection(mut pathlib.get_dir(path: col_a_path)!)!
a.add_collection(mut pathlib.get_dir(path: col_b_path)!)!
// Validate and export
a.validate_links()!
export_path := '${test_base}/export_recursive_img'
a.export(destination: export_path)!
// Verify pages exported
assert os.exists('${export_path}/content/col_a/page_a.md'), 'page_a should exist'
assert os.exists('${export_path}/content/col_a/page_b.md'), 'page_b from col_b should be included'
// Verify images exported to col_a image directory
assert os.exists('${export_path}/content/col_a/img/local.png'), 'Local image should exist'
assert os.exists('${export_path}/content/col_a/img/b_image.jpg'), 'Image from cross-collection reference should be copied'
println(' Recursive cross-collection with images test passed')
}

View File

@@ -0,0 +1,207 @@
module core
import incubaid.herolib.core.pathlib
import os
const test_dir = '/tmp/doctree_save_test'
fn testsuite_begin() {
os.rmdir_all(test_dir) or {}
os.mkdir_all(test_dir)!
}
fn testsuite_end() {
os.rmdir_all(test_dir) or {}
}
fn test_save_and_load_basic() {
// Create a collection with some content
col_path := '${test_dir}/docs'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:docs')!
mut page1 := pathlib.get_file(path: '${col_path}/intro.md', create: true)!
page1.write('# Introduction\n\nWelcome to the docs!')!
mut page2 := pathlib.get_file(path: '${col_path}/guide.md', create: true)!
page2.write('# Guide\n\nMore content here.')!
// Create and scan doctree
mut a := new(name: 'my_docs')!
a.scan(path: test_dir)!
assert a.collections.len == 1
// Save all collections
// a.save(destination_meta: '/tmp/doctree_meta')!
// assert os.exists('${col_path}/.collection.json')
// // Load in a new doctree
// mut a2 := new(name: 'loaded_docs')!
// a2.load_from_directory(test_dir)!
// assert a2.collections.len == 1
// // Access loaded data
// loaded_col := a2.get_collection('docs')!
// assert loaded_col.name == 'docs'
// assert loaded_col.pages.len == 2
// // Verify pages exist
// assert loaded_col.page_exists('intro')
// assert loaded_col.page_exists('guide')
// // Read page content
// mut intro_page := loaded_col.page_get('intro')!
// content := intro_page.read_content()!
// assert content.contains('# Introduction')
// assert content.contains('Welcome to the docs!')
}
fn test_save_and_load_with_includes() {
col_path := '${test_dir}/docs_include'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:docs')!
mut page1 := pathlib.get_file(path: '${col_path}/intro.md', create: true)!
page1.write('# Introduction\n\nWelcome to the docs!')!
mut page2 := pathlib.get_file(path: '${col_path}/guide.md', create: true)!
page2.write('# Guide\n\n!!include docs:intro\n\nMore content here.')!
// Create and scan doctree
mut a := new(name: 'my_docs')!
a.scan(path: '${test_dir}/docs_include')!
// Validate links (should find the include)
a.validate_links()!
col := a.get_collection('docs')!
assert !col.has_errors()
// // Save
// a.save(destination_meta: '/tmp/doctree_meta')!
// // Load
// mut a2 := new(name: 'loaded')!
// a2.load_from_directory('${test_dir}/docs_include')!
// loaded_col := a2.get_collection('docs')!
// assert loaded_col.pages.len == 2
// assert !loaded_col.has_errors()
}
fn test_save_and_load_with_errors() {
col_path := '${test_dir}/docs_errors'
os.mkdir_all(col_path)!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:docs')!
// Create page with broken link
mut page1 := pathlib.get_file(path: '${col_path}/broken.md', create: true)!
page1.write('[Broken link](nonexistent)')!
// Create and scan doctree
mut a := new(name: 'my_docs')!
a.scan(path: '${test_dir}/docs_errors')!
// Validate - will generate errors
a.validate_links()!
col := a.get_collection('docs')!
assert col.has_errors()
initial_error_count := col.errors.len
// // Save with errors
// a.save(destination_meta: '/tmp/doctree_meta')!
// // Load
// mut a2 := new(name: 'loaded')!
// a2.load_from_directory('${test_dir}/docs_errors')!
// loaded_col := a2.get_collection('docs')!
// assert loaded_col.has_errors()
// assert loaded_col.errors.len == initial_error_count
// assert loaded_col.error_cache.len == initial_error_count
}
fn test_save_and_load_multiple_collections() {
// Create multiple collections
col1_path := '${test_dir}/multi/col1'
col2_path := '${test_dir}/multi/col2'
os.mkdir_all(col1_path)!
os.mkdir_all(col2_path)!
mut cfile1 := pathlib.get_file(path: '${col1_path}/.collection', create: true)!
cfile1.write('name:col1')!
mut cfile2 := pathlib.get_file(path: '${col2_path}/.collection', create: true)!
cfile2.write('name:col2')!
mut page1 := pathlib.get_file(path: '${col1_path}/page1.md', create: true)!
page1.write('# Page 1')!
mut page2 := pathlib.get_file(path: '${col2_path}/page2.md', create: true)!
page2.write('# Page 2')!
// Create and save
mut a := new(name: 'multi')!
a.scan(path: '${test_dir}/multi')!
assert a.collections.len == 2
// a.save(destination_meta: '/tmp/doctree_meta')!
// // Load from directory
// mut a2 := new(name: 'loaded')!
// a2.load_from_directory('${test_dir}/multi')!
// assert a2.collections.len == 2
// assert a2.get_collection('col1')!.page_exists('page1')
// assert a2.get_collection('col2')!.page_exists('page2')
}
fn test_save_and_load_with_images() {
col_path := '${test_dir}/docs_images'
os.mkdir_all(col_path)!
os.mkdir_all('${col_path}/img')!
mut cfile := pathlib.get_file(path: '${col_path}/.collection', create: true)!
cfile.write('name:docs')!
mut page := pathlib.get_file(path: '${col_path}/page.md', create: true)!
page.write('# Page with image')!
// Create a dummy image file
mut img := pathlib.get_file(path: '${col_path}/img/test.png', create: true)!
img.write('fake png data')!
// Create and scan
mut a := new(name: 'my_docs')!
a.scan(path: '${test_dir}/docs_images')!
col := a.get_collection('docs')!
// assert col.images.len == 1
assert col.image_exists('test.png')!
// // Save
// a.save(destination_meta: '/tmp/doctree_meta')!
// // Load
// mut a2 := new(name: 'loaded')!
// a2.load_from_directory('${test_dir}/docs_images')!
// loaded_col := a2.get_collection('docs')!
// assert loaded_col.images.len == 1
// assert loaded_col.image_exists('test.png')!
img_file := col.image_get('test.png')!
assert img_file.name == 'test.png'
assert img_file.is_image()
}

View File

@@ -1,4 +1,4 @@
module site
module meta
import incubaid.herolib.core.texttools

View File

@@ -0,0 +1,39 @@
module meta
import incubaid.herolib.data.doctree.client as doctree_client
import incubaid.herolib.data.markdown.tools as markdowntools
// Page represents a single documentation page
pub struct Page {
pub mut:
id string // Unique identifier: "collection:page_name"
title string // Display title (optional, extracted from markdown if empty)
description string // Brief description for metadata
questions []Question
}
pub struct Question {
pub mut:
question string
answer string
}
pub fn (mut p Page) content(client doctree_client.AtlasClient) !string {
mut c := client.get_page_content(p.id)!
if p.title =="" {
p.title = markdowntools.extract_title(c)
}
//TODO in future should do AI
if p.description =="" {
p.description = p.title
}
return c
}

View File

@@ -0,0 +1,36 @@
module meta
import json
// ============================================================================
// Sidebar Navigation Models (Domain Types)
// ============================================================================
pub struct SideBar {
pub mut:
my_sidebar []NavItem
}
pub type NavItem = NavDoc | NavCat | NavLink
pub struct NavDoc {
pub:
id string
label string
hide_title bool
}
pub struct NavCat {
pub mut:
label string
collapsible bool = true
collapsed bool
items []NavItem
}
pub struct NavLink {
pub:
label string
href string
description string
}

View File

@@ -1,9 +1,9 @@
module site
module meta
@[heap]
pub struct Site {
pub mut:
pages map[string]Page // key: "collection:page_name"
nav SideBar // Navigation sidebar configuration
siteconfig SiteConfig // Full site configuration
siteconfig SiteConfig // Full site configuration
}

View File

@@ -1,4 +1,4 @@
module site
module meta
import os
// Combined config structure

View File

@@ -1,4 +1,4 @@
module site
module meta
import os
import incubaid.herolib.core.playbook { PlayBook }

View File

@@ -1,4 +1,4 @@
module site
module meta
import os
import incubaid.herolib.core.playbook { PlayBook }

View File

@@ -1,4 +1,4 @@
module site
module meta
import os
import incubaid.herolib.core.playbook { PlayBook }

View File

@@ -1,4 +1,4 @@
module site
module meta
import os
import incubaid.herolib.core.playbook { PlayBook }

View File

@@ -1,4 +1,4 @@
module site
module meta
import os
import incubaid.herolib.core.playbook { PlayBook }

View File

@@ -1,4 +1,4 @@
module site
module meta
import os
import incubaid.herolib.core.playbook { PlayBook }
@@ -6,18 +6,6 @@ import incubaid.herolib.core.texttools
import time
import incubaid.herolib.ui.console
// ============================================================
// Helper function: normalize name while preserving .md extension handling
// ============================================================
fn normalize_page_name(name string) string {
mut result := name
// Remove .md extension if present for processing
if result.ends_with('.md') {
result = result[0..result.len - 3]
}
// Apply name fixing
return texttools.name_fix(result)
}
// ============================================================
// Internal structure for tracking category information
@@ -60,8 +48,8 @@ fn play_pages(mut plbook PlayBook, mut website Site) ! {
category_name_fixed := texttools.name_fix(category_name)
// Get label (derive from name if not specified)
mut label := p.get_default('label', texttools.name_fix_snake_to_pascal(category_name_fixed))!
// label is empty when not specified
mut label := p.get_default('label', "")!
mut position := p.get_int_default('position', next_category_position)!
// Auto-increment position if using default
@@ -139,14 +127,11 @@ fn play_pages(mut plbook PlayBook, mut website Site) ! {
website.pages[page_id] = page
// Create navigation item with human-readable label
nav_label := if page_title.len > 0 {
page_title
} else {
texttools.title_case(page_name)
}
// nav_label := page_title.len
nav_doc := NavDoc{
id: page_id
label: nav_label
id: page.id
label: page.title
hide_title: page.hide_title
}
// Add to appropriate category or root

View File

@@ -1,4 +1,4 @@
module site
module meta
import os
import incubaid.herolib.core.playbook { PlayBook }

View File

@@ -511,10 +511,10 @@ The modern ebook structure uses `.hero` files for configuration and `.heroscript
```
my_ebook/
├── scan.hero # !!atlas.scan - collection scanning
├── scan.hero # !!doctree.scan - collection scanning
├── config.hero # !!site.config - site configuration
├── menus.hero # !!site.navbar and !!site.footer
├── include.hero # !!docusaurus.define and !!atlas.export
├── include.hero # !!docusaurus.define and !!doctree.export
├── 1_intro.heroscript # Page definitions (categories + pages)
├── 2_concepts.heroscript # More page definitions
└── 3_advanced.heroscript # Additional pages
@@ -530,7 +530,7 @@ Use numeric prefixes on `.heroscript` files to control page/category ordering in
### Example scan.hero
```heroscript
!!atlas.scan path:"../../collections/my_collection"
!!doctree.scan path:"../../collections/my_collection"
```
### Example include.hero
@@ -542,7 +542,7 @@ Use numeric prefixes on `.heroscript` files to control page/category ordering in
// Or define directly
!!docusaurus.define name:'my_ebook'
!!atlas.export include:true
!!doctree.export include:true
```
### Running an Ebook

View File

@@ -1,4 +1,4 @@
module site
module meta
import incubaid.herolib.core.playbook
import incubaid.herolib.ui.console

View File

@@ -0,0 +1,2 @@
module meta

48
lib/web/doctree/utils.v Normal file
View File

@@ -0,0 +1,48 @@
module doctree
import incubaid.herolib.core.texttools
// returns collection and file name from "collection:file" format
// works for file, image, page keys
pub fn key_parse(key string) !(string, string) {
parts := key.split(':')
if parts.len != 2 {
return error('Invalid key format. Use "collection:file"')
}
col := texttools.name_fix(parts[0])
file := texttools.name_fix(parts[1])
return col, file
}
// ============================================================
// Helper function: normalize name while preserving .md extension handling
// ============================================================
pub fn name_fix(name string) string {
mut result := name
// Remove .md extension if present for processing
if result.ends_with('.md') {
result = result[0..result.len - 3]
}
// Apply name fixing
result = strip_numeric_prefix(result)
return texttools.name_fix(result)
}
// Strip numeric prefix from filename (e.g., "03_linux_installation" -> "linux_installation")
// Docusaurus automatically strips these prefixes from URLs
fn strip_numeric_prefix(name string) string {
// Match pattern: digits followed by underscore at the start
if name.len > 2 && name[0].is_digit() {
for i := 1; i < name.len; i++ {
if name[i] == `_` {
// Found the underscore, return everything after it
return name[i + 1..]
}
if !name[i].is_digit() {
// Not a numeric prefix pattern, return as-is
return name
}
}
}
return name
}

View File

@@ -25,10 +25,10 @@ The recommended structure for an ebook follows this pattern:
```
my_ebook/
├── scan.hero # Atlas collection scanning
├── scan.hero # DocTree collection scanning
├── config.hero # Site configuration
├── menus.hero # Navbar and footer configuration
├── include.hero # Docusaurus define and atlas export
├── include.hero # Docusaurus define and doctree export
├── 1_intro.heroscript # Page definitions (numbered for ordering)
├── 2_concepts.heroscript # More page definitions
└── 3_advanced.heroscript # Additional pages
@@ -42,10 +42,10 @@ Defines which collections to scan for content:
```heroscript
// Scan local collections
!!atlas.scan path:"../../collections/my_collection"
!!doctree.scan path:"../../collections/my_collection"
// Scan remote collections from git
!!atlas.scan git_url:"https://git.example.com/org/repo/src/branch/main/collections/docs"
!!doctree.scan git_url:"https://git.example.com/org/repo/src/branch/main/collections/docs"
```
#### `config.hero` - Site Configuration
@@ -113,7 +113,7 @@ Links to shared configuration or defines docusaurus directly:
// Option 2: Define directly
!!docusaurus.define name:'my_ebook'
!!atlas.export include:true
!!doctree.export include:true
```
#### Page Definition Files (`*.heroscript`)
@@ -145,7 +145,7 @@ Define pages and categories:
## Collections
Collections are directories containing markdown files. They're scanned by Atlas and referenced in page definitions.
Collections are directories containing markdown files. They're scanned by DocTree and referenced in page definitions.
```
collections/
@@ -189,16 +189,16 @@ The older approach using `!!docusaurus.add` is still supported but not recommend
## HeroScript Actions Reference
### `!!atlas.scan`
### `!!doctree.scan`
Scans a directory for markdown collections:
- `path` (string): Local path to scan
- `git_url` (string): Git URL to clone and scan
- `name` (string): Atlas instance name (default: `main`)
- `name` (string): DocTree instance name (default: `main`)
- `ignore` (list): Directory names to skip
### `!!atlas.export`
### `!!doctree.export`
Exports scanned collections:
@@ -215,7 +215,7 @@ Configures the Docusaurus build environment:
- `reset` (bool): Clean build directory before starting
- `template_update` (bool): Update Docusaurus template
- `install` (bool): Run `bun install`
- `atlas_dir` (string): Atlas export directory
- `doctree_dir` (string): DocTree export directory
### `!!site.config`
@@ -254,4 +254,4 @@ Defines a sidebar category:
## See Also
- `lib/web/site` - Generic site configuration module
- `lib/data/atlas` - Atlas collection management
- `lib/data/doctree` - DocTree collection management

View File

@@ -17,7 +17,7 @@ pub mut:
reset bool
template_update bool
coderoot string
atlas_dir string
doctree_dir string
}
@[params]
@@ -29,7 +29,7 @@ pub mut:
reset bool
template_update bool
coderoot string
atlas_dir string
doctree_dir string
}
// return the last know config
@@ -38,8 +38,8 @@ pub fn config() !DocusaurusConfig {
docusaurus_config << DocusaurusConfigParams{}
}
mut args := docusaurus_config[0] or { panic('bug in docusaurus config') }
if args.atlas_dir == '' {
return error('atlas_dir is not set')
if args.doctree_dir == '' {
return error('doctree_dir is not set')
}
if args.path_build == '' {
args.path_build = '${os.home_dir()}/hero/var/docusaurus/build'
@@ -58,7 +58,7 @@ pub fn config() !DocusaurusConfig {
install: args.install
reset: args.reset
template_update: args.template_update
atlas_dir: args.atlas_dir
doctree_dir: args.doctree_dir
}
if c.install {
install(c)!

View File

@@ -73,7 +73,7 @@ pub mut:
port int = 3000
open bool = true // whether to open the browser automatically
watch_changes bool // whether to watch for changes in docs and rebuild automatically
skip_generate bool // whether to skip generation (useful when docs are pre-generated, e.g., from atlas)
skip_generate bool // whether to skip generation (useful when docs are pre-generated, e.g., from doctree)
}
pub fn (mut s DocSite) open(args DevArgs) ! {

View File

@@ -37,7 +37,7 @@ pub fn (mut docsite DocSite) generate() ! {
mut sidebar_file := pathlib.get_file(path: '${cfg_path}/sidebar.json', create: true)!
sidebar_file.write(docsite.config.sidebar_json_txt)!
docsite.link_docs()!
docsite.generate_docs()!
docsite.import()!
}

View File

@@ -1,14 +1,14 @@
module docusaurus
import incubaid.herolib.core.pathlib
import incubaid.herolib.data.atlas.client as atlas_client
import incubaid.herolib.data.doctree.client as doctree_client
import incubaid.herolib.data.markdown.tools as markdowntools
import incubaid.herolib.ui.console
import incubaid.herolib.web.site
import os
// ============================================================================
// Doc Linking - Generate Docusaurus docs from Atlas collections
// Doc Linking - Generate Docusaurus docs from DocTree collections
// ============================================================================
// get_first_doc_from_sidebar recursively finds the first doc ID in the sidebar.
@@ -35,16 +35,16 @@ fn get_first_doc_from_sidebar(items []site.NavItem) string {
return ''
}
// link_docs generates markdown files from site page definitions.
// Pages are fetched from Atlas collections and written with frontmatter.
pub fn (mut docsite DocSite) link_docs() ! {
// generate_docs generates markdown files from site page definitions.
// Pages are fetched from DocTree collections and written with frontmatter.
pub fn (mut docsite DocSite) generate_docs() ! {
c := config()!
docs_path := '${c.path_build.path}/docs'
reset_docs_dir(docs_path)!
console.print_header('Linking docs to ${docs_path}')
console.print_header('Write doc: ${docs_path}')
mut client := atlas_client.new(export_dir: c.atlas_dir)!
mut client := doctree_client.new(export_dir: c.doctree_dir)!
mut errors := []string{}
// Determine if we need to set a docs landing page (when url_home ends with "/")
@@ -72,7 +72,7 @@ fn reset_docs_dir(docs_path string) ! {
os.mkdir_all(docs_path)!
}
fn report_errors(mut client atlas_client.AtlasClient, errors []string) ! {
fn report_errors(mut client doctree_client.AtlasClient, errors []string) ! {
available := client.list_markdown() or { 'Could not list available pages' }
console.print_stderr('Available pages:\n${available}')
return error('Errors during doc generation:\n${errors.join('\n\n')}')
@@ -82,7 +82,7 @@ fn report_errors(mut client atlas_client.AtlasClient, errors []string) ! {
// Page Processing
// ============================================================================
fn process_page(mut client atlas_client.AtlasClient, docs_path string, page site.Page, first_doc_page string, mut errors []string) {
fn process_page(mut client doctree_client.AtlasClient, docs_path string, page site.Page, first_doc_page string, mut errors []string) {
collection, page_name := parse_page_src(page.src) or {
errors << err.msg()
return
@@ -122,7 +122,7 @@ fn write_page(docs_path string, page_name string, page site.Page, content string
file.write(final_content)!
}
fn copy_page_assets(mut client atlas_client.AtlasClient, docs_path string, collection string, page_name string) {
fn copy_page_assets(mut client doctree_client.AtlasClient, docs_path string, collection string, page_name string) {
client.copy_images(collection, page_name, docs_path) or {}
client.copy_files(collection, page_name, docs_path) or {}
}
@@ -132,12 +132,21 @@ fn copy_page_assets(mut client atlas_client.AtlasClient, docs_path string, colle
// ============================================================================
fn build_frontmatter(page site.Page, content string, is_landing_page bool) string {
title := get_title(page, content)
description := get_description(page, title)
mut lines := ['---']
lines << "title: '${escape_yaml(title)}'"
lines << "description: '${escape_yaml(description)}'"
lines << "title: '${title}'"
lines << "description: '${description}'"
// if page.id.contains('tfhowto_tools'){
// println('extracted title: ${title}')
// println('page.src: ${lines}')
// $dbg;
// }
// Add slug: / for the docs landing page so /docs/ works directly
if is_landing_page {
@@ -154,25 +163,3 @@ fn build_frontmatter(page site.Page, content string, is_landing_page bool) strin
lines << '---'
return lines.join('\n')
}
fn get_title(page site.Page, content string) string {
if page.title.len > 0 {
return page.title
}
extracted := markdowntools.extract_title(content)
if extracted.len > 0 {
return extracted
}
return page.src.split(':').last()
}
fn get_description(page site.Page, title string) string {
if page.description.len > 0 {
return page.description
}
return title
}
fn escape_yaml(s string) string {
return s.replace("'", "''")
}

View File

@@ -1,7 +1,7 @@
module docusaurus
import incubaid.herolib.core.pathlib
// import incubaid.herolib.data.atlas.client as atlas_client
// import incubaid.herolib.data.doctree.client as doctree_client
// import incubaid.herolib.web.site { Page, Section, Site }
// import incubaid.herolib.data.markdown.tools as markdowntools
// import incubaid.herolib.ui.console
@@ -24,7 +24,7 @@ import incubaid.herolib.core.pathlib
// docs_path := '${c.path_build.path}/docs'
// // Create the appropriate client based on configuration
// mut client_instance := atlas_client.new(export_dir: c.atlas_dir)!
// mut client_instance := doctree_client.new(export_dir: c.doctree_dir)!
// mut client := IDocClient(client_instance)
// mut gen := SiteGenerator{
@@ -378,8 +378,8 @@ import incubaid.herolib.core.pathlib
// }
// }
// // STEP 5: Fix bare page references (from atlas self-contained exports)
// // Atlas exports convert cross-collection links to simple relative links like "token_system2.md"
// // STEP 5: Fix bare page references (from doctree self-contained exports)
// // DocTree exports convert cross-collection links to simple relative links like "token_system2.md"
// // We need to transform these to proper relative paths based on Docusaurus structure
// for page_name, target_dir in page_to_path {
// // Match links in the format ](page_name) or ](page_name.md)

View File

@@ -0,0 +1,60 @@
module doc
import incubaid.herolib.web.site
//this is the logic to create docusaurus sidebar.json from site.NavItems
struct SidebarItem {
typ string @[json: 'type']
id string @[omitempty]
label string
href string @[omitempty]
description string @[omitempty]
collapsible bool @[json: 'collapsible'; omitempty]
collapsed bool @[json: 'collapsed'; omitempty]
items []SidebarItem @[omitempty]
}
// ============================================================================
// JSON Serialization
// ============================================================================
pub fn sidebar_to_json(sb site.SideBar) !string {
items := sb.my_sidebar.map(to_sidebar_item(it))
return json.encode_pretty(items)
}
fn to_sidebar_item(item site.NavItem) SidebarItem {
return match item {
NavDoc { from_doc(item) }
NavLink { from_link(item) }
NavCat { from_category(item) }
}
}
fn from_doc(doc site.NavDoc) SidebarItem {
return SidebarItem{
typ: 'doc'
id: doc.id
label: doc.label
}
}
fn from_link(link site.NavLink) SidebarItem {
return SidebarItem{
typ: 'link'
label: link.label
href: link.href
description: link.description
}
}
fn from_category(cat site.NavCat) SidebarItem {
return SidebarItem{
typ: 'category'
label: cat.label
collapsible: cat.collapsible
collapsed: cat.collapsed
items: cat.items.map(to_sidebar_item(it))
}
}

View File

@@ -75,7 +75,7 @@ After running the test:
If links don't resolve:
1. Check that the collection is registered in the atlas
1. Check that the collection is registered in the doctree
2. Verify page names match (no typos)
3. Run with debug flag (`-d`) to see detailed output
4. Check `~/hero/var/docusaurus/build/docs/` for generated files

View File

@@ -10,7 +10,7 @@ If links appear broken, check:
1. The collection name is correct
2. The page name matches the markdown filename (without `.md`)
3. The collection is properly registered in the atlas
3. The collection is properly registered in the doctree
### Page Not Found
@@ -29,11 +29,11 @@ Ensure the page is defined in your heroscript:
## Error Messages
| Error | Solution |
|-------|----------|
| "Page not found" | Check page name spelling |
| "Collection not found" | Verify atlas configuration |
| "Link resolution failed" | Check link syntax |
| Error | Solution |
| ------------------------ | ---------------------------- |
| "Page not found" | Check page name spelling |
| "Collection not found" | Verify doctree configuration |
| "Link resolution failed" | Check link syntax |
## Navigation

View File

@@ -1,4 +1,4 @@
!!docusaurus.define name:'test_site'
!!atlas.export include:true
!!doctree.export include:true

View File

@@ -1,2 +1,2 @@
!!atlas.scan path:"../../collections/test_collection"
!!doctree.scan path:"../../collections/test_collection"

View File

@@ -1,7 +1,7 @@
module docusaurus
import incubaid.herolib.core.playbook { PlayBook }
import incubaid.herolib.data.atlas
import incubaid.herolib.data.doctree
import incubaid.herolib.ui.console
import os
@@ -24,7 +24,7 @@ fn process_define(mut plbook PlayBook) !&DocSite {
mut action := plbook.ensure_once(filter: 'docusaurus.define')!
p := action.params
atlas_dir := p.get_default('atlas_dir', '${os.home_dir()}/hero/var/atlas_export')!
doctree_dir := p.get_default('doctree_dir', '${os.home_dir()}/hero/var/doctree_export')!
config_set(
path_build: p.get_default('path_build', '')!
@@ -32,13 +32,13 @@ fn process_define(mut plbook PlayBook) !&DocSite {
reset: p.get_default_false('reset')
template_update: p.get_default_false('template_update')
install: p.get_default_false('install')
atlas_dir: atlas_dir
doctree_dir: doctree_dir
)!
site_name := p.get('name') or { return error('docusaurus.define: "name" is required') }
atlas_name := p.get_default('atlas', 'main')!
doctree_name := p.get_default('doctree', 'main')!
export_atlas(atlas_name, atlas_dir)!
export_doctree(doctree_name, doctree_dir)!
dsite_define(site_name)!
action.done = true
@@ -77,11 +77,11 @@ fn process_dev(mut plbook PlayBook, mut dsite DocSite) ! {
action.done = true
}
fn export_atlas(name string, dir string) ! {
if !atlas.exists(name) {
fn export_doctree(name string, dir string) ! {
if !doctree.exists(name) {
return
}
console.print_debug('Auto-exporting Atlas "${name}" to ${dir}')
mut a := atlas.get(name)!
console.print_debug('Auto-exporting DocTree "${name}" to ${dir}')
mut a := doctree.get(name)!
a.export(destination: dir, reset: true, include: true, redis: false)!
}

View File

@@ -1,536 +0,0 @@
# AI Instructions for Site Module HeroScript
This document provides comprehensive instructions for AI agents working with the Site module's HeroScript format.
## HeroScript Format Overview
HeroScript is a declarative configuration language with the following characteristics:
### Basic Syntax
```heroscript
!!actor.action
param1: "value1"
param2: "value2"
multiline_param: "
This is a multiline value.
It can span multiple lines.
"
arg1 arg2 // Arguments without keys
```
**Key Rules:**
1. Actions start with `!!` followed by `actor.action` format
2. Parameters are indented and use `key: "value"` or `key: value` format
3. Values with spaces must be quoted
4. Multiline values are supported with quotes
5. Arguments without keys are space-separated
6. Comments start with `//`
## Site Module Actions
### 1. Site Configuration (`!!site.config`)
**Purpose:** Define the main site configuration including title, description, and metadata.
**Required Parameters:**
- `name`: Site identifier (will be normalized to snake_case)
**Optional Parameters:**
- `title`: Site title (default: "Documentation Site")
- `description`: Site description
- `tagline`: Site tagline
- `favicon`: Path to favicon (default: "img/favicon.png")
- `image`: Default site image (default: "img/tf_graph.png")
- `copyright`: Copyright text
- `url`: Main site URL
- `base_url`: Base URL path (default: "/")
- `url_home`: Home page path
**Example:**
```heroscript
!!site.config
name: "my_documentation"
title: "My Documentation Site"
description: "Comprehensive technical documentation"
tagline: "Learn everything you need"
url: "https://docs.example.com"
base_url: "/"
```
**AI Guidelines:**
- Always include `name` parameter
- Use descriptive titles and descriptions
- Ensure URLs are properly formatted with protocol
### 2. Metadata Configuration (`!!site.config_meta`)
**Purpose:** Override specific metadata for SEO purposes.
**Optional Parameters:**
- `title`: SEO-specific title (overrides site.config title for meta tags)
- `image`: SEO-specific image (overrides site.config image for og:image)
- `description`: SEO-specific description
**Example:**
```heroscript
!!site.config_meta
title: "My Docs - Complete Guide"
image: "img/social-preview.png"
description: "The ultimate guide to using our platform"
```
**AI Guidelines:**
- Use only when SEO metadata needs to differ from main config
- Keep titles concise for social media sharing
- Use high-quality images for social previews
### 3. Navigation Bar (`!!site.navbar` or `!!site.menu`)
**Purpose:** Configure the main navigation bar.
**Optional Parameters:**
- `title`: Navigation title (defaults to site.config title)
- `logo_alt`: Logo alt text
- `logo_src`: Logo image path
- `logo_src_dark`: Dark mode logo path
**Example:**
```heroscript
!!site.navbar
title: "My Site"
logo_alt: "My Site Logo"
logo_src: "img/logo.svg"
logo_src_dark: "img/logo-dark.svg"
```
**AI Guidelines:**
- Use `!!site.navbar` for modern syntax (preferred)
- `!!site.menu` is supported for backward compatibility
- Provide both light and dark logos when possible
### 4. Navigation Items (`!!site.navbar_item` or `!!site.menu_item`)
**Purpose:** Add items to the navigation bar.
**Required Parameters (one of):**
- `to`: Internal link path
- `href`: External URL
**Optional Parameters:**
- `label`: Display text (required in practice)
- `position`: "left" or "right" (default: "right")
**Example:**
```heroscript
!!site.navbar_item
label: "Documentation"
to: "docs/intro"
position: "left"
!!site.navbar_item
label: "GitHub"
href: "https://github.com/myorg/repo"
position: "right"
```
**AI Guidelines:**
- Use `to` for internal navigation
- Use `href` for external links
- Position important items on the left, secondary items on the right
### 5. Footer Configuration (`!!site.footer`)
**Purpose:** Configure footer styling.
**Optional Parameters:**
- `style`: "dark" or "light" (default: "dark")
**Example:**
```heroscript
!!site.footer
style: "dark"
```
### 6. Footer Items (`!!site.footer_item`)
**Purpose:** Add links to the footer, grouped by title.
**Required Parameters:**
- `title`: Group title (items with same title are grouped together)
- `label`: Link text
**Required Parameters (one of):**
- `to`: Internal link path
- `href`: External URL
**Example:**
```heroscript
!!site.footer_item
title: "Docs"
label: "Introduction"
to: "intro"
!!site.footer_item
title: "Docs"
label: "API Reference"
to: "api"
!!site.footer_item
title: "Community"
label: "Discord"
href: "https://discord.gg/example"
```
**AI Guidelines:**
- Group related links under the same title
- Use consistent title names across related items
- Provide both internal and external links as appropriate
### 7. Page Categories (`!!site.page_category`)
**Purpose:** Create a section/category to organize pages.
**Required Parameters:**
- `name`: Category identifier (snake_case)
**Optional Parameters:**
- `label`: Display name (auto-generated from name if not provided)
- `position`: Manual sort order (auto-incremented if not specified)
- `path`: URL path segment (defaults to normalized label)
**Example:**
```heroscript
!!site.page_category
name: "getting_started"
label: "Getting Started"
position: 100
!!site.page_category
name: "advanced_topics"
label: "Advanced Topics"
```
**AI Guidelines:**
- Use descriptive snake_case names
- Let label be auto-generated when possible (name_fix converts to Title Case)
- Categories persist for all subsequent pages until a new category is declared
- Position values should leave gaps (100, 200, 300) for future insertions
### 8. Pages (`!!site.page`)
**Purpose:** Define individual pages in the site.
**Required Parameters:**
- `src`: Source reference as `collection:page_name` (required for first page in a collection)
**Optional Parameters:**
- `name`: Page identifier (extracted from src if not provided)
- `title`: Page title (extracted from markdown if not provided)
- `description`: Page description for metadata
- `slug`: Custom URL slug
- `position`: Manual sort order (auto-incremented if not specified)
- `draft`: Mark as draft (default: false)
- `hide_title`: Hide title in rendering (default: false)
- `path`: Custom path (defaults to current category name)
- `category`: Override current category
- `title_nr`: Title numbering level
**Example:**
```heroscript
!!site.page src: "docs:introduction"
description: "Introduction to the platform"
slug: "/"
!!site.page src: "quickstart"
description: "Get started in 5 minutes"
!!site.page src: "installation"
title: "Installation Guide"
description: "How to install and configure"
position: 10
```
**AI Guidelines:**
- **Collection Persistence:** Specify collection once (e.g., `docs:introduction`), then subsequent pages only need page name (e.g., `quickstart`)
- **Category Persistence:** Pages belong to the most recently declared category
- **Title Extraction:** Prefer extracting titles from markdown files
- **Position Management:** Use automatic positioning unless specific order is required
- **Description Required:** Always provide descriptions for SEO
- **Slug Usage:** Use slug for special pages like homepage (`slug: "/"`)
### 9. Import External Content (`!!site.import`)
**Purpose:** Import content from external sources.
**Optional Parameters:**
- `name`: Import identifier
- `url`: Git URL or HTTP URL
- `path`: Local file system path
- `dest`: Destination path in site
- `replace`: Comma-separated key:value pairs for variable replacement
- `visible`: Whether imported content is visible (default: true)
**Example:**
```heroscript
!!site.import
url: "https://github.com/example/docs"
dest: "external"
replace: "VERSION:1.0.0,PROJECT:MyProject"
visible: true
```
**AI Guidelines:**
- Use for shared documentation across multiple sites
- Replace variables using `${VARIABLE}` syntax in source content
- Set `visible: false` for imported templates or partials
### 10. Publish Destinations (`!!site.publish` and `!!site.publish_dev`)
**Purpose:** Define where to publish the built site.
**Optional Parameters:**
- `path`: File system path or URL
- `ssh_name`: SSH connection name for remote deployment
**Example:**
```heroscript
!!site.publish
path: "/var/www/html/docs"
ssh_name: "production_server"
!!site.publish_dev
path: "/tmp/docs-preview"
```
**AI Guidelines:**
- Use `!!site.publish` for production deployments
- Use `!!site.publish_dev` for development/preview deployments
- Can specify multiple destinations
## File Organization Best Practices
### Naming Convention
Use numeric prefixes to control execution order:
```
0_config.heroscript # Site configuration
1_navigation.heroscript # Menu and footer
2_intro.heroscript # Introduction pages
3_guides.heroscript # User guides
4_reference.heroscript # API reference
```
**AI Guidelines:**
- Always use numeric prefixes (0_, 1_, 2_, etc.)
- Leave gaps in numbering (0, 10, 20) for future insertions
- Group related configurations in the same file
- Process order matters: config → navigation → pages
### Execution Order Rules
1. **Configuration First:** `!!site.config` must be processed before other actions
2. **Categories Before Pages:** Declare `!!site.page_category` before pages in that category
3. **Collection Persistence:** First page in a collection must specify `collection:page_name`
4. **Category Persistence:** Pages inherit the most recent category declaration
## Common Patterns
### Pattern 1: Simple Documentation Site
```heroscript
!!site.config
name: "simple_docs"
title: "Simple Documentation"
!!site.navbar
title: "Simple Docs"
!!site.page src: "docs:index"
description: "Welcome page"
slug: "/"
!!site.page src: "getting-started"
description: "Getting started guide"
!!site.page src: "api"
description: "API reference"
```
### Pattern 2: Multi-Section Documentation
```heroscript
!!site.config
name: "multi_section_docs"
title: "Complete Documentation"
!!site.page_category
name: "introduction"
label: "Introduction"
!!site.page src: "docs:welcome"
description: "Welcome to our documentation"
!!site.page src: "overview"
description: "Platform overview"
!!site.page_category
name: "tutorials"
label: "Tutorials"
!!site.page src: "tutorial_basics"
description: "Basic tutorial"
!!site.page src: "tutorial_advanced"
description: "Advanced tutorial"
```
### Pattern 3: Complex Site with External Links
```heroscript
!!site.config
name: "complex_site"
title: "Complex Documentation Site"
url: "https://docs.example.com"
!!site.navbar
title: "My Platform"
logo_src: "img/logo.svg"
!!site.navbar_item
label: "Docs"
to: "docs/intro"
position: "left"
!!site.navbar_item
label: "API"
to: "api"
position: "left"
!!site.navbar_item
label: "GitHub"
href: "https://github.com/example/repo"
position: "right"
!!site.footer
style: "dark"
!!site.footer_item
title: "Documentation"
label: "Getting Started"
to: "docs/intro"
!!site.footer_item
title: "Community"
label: "Discord"
href: "https://discord.gg/example"
!!site.page_category
name: "getting_started"
!!site.page src: "docs:introduction"
description: "Introduction to the platform"
slug: "/"
!!site.page src: "installation"
description: "Installation guide"
```
## Error Prevention
### Common Mistakes to Avoid
1. **Missing Collection on First Page:**
```heroscript
# WRONG - no collection specified
!!site.page src: "introduction"
# CORRECT
!!site.page src: "docs:introduction"
```
2. **Category Without Name:**
```heroscript
# WRONG - missing name
!!site.page_category
label: "Getting Started"
# CORRECT
!!site.page_category
name: "getting_started"
label: "Getting Started"
```
3. **Missing Description:**
```heroscript
# WRONG - no description
!!site.page src: "docs:intro"
# CORRECT
!!site.page src: "docs:intro"
description: "Introduction to the platform"
```
4. **Incorrect File Ordering:**
```
# WRONG - pages before config
pages.heroscript
config.heroscript
# CORRECT - config first
0_config.heroscript
1_pages.heroscript
```
## Validation Checklist
When generating HeroScript for the Site module, verify:
- [ ] `!!site.config` includes `name` parameter
- [ ] All pages have `description` parameter
- [ ] First page in each collection specifies `collection:page_name`
- [ ] Categories are declared before their pages
- [ ] Files use numeric prefixes for ordering
- [ ] Navigation items have either `to` or `href`
- [ ] Footer items are grouped by `title`
- [ ] External URLs include protocol (https://)
- [ ] Paths don't have trailing slashes unless intentional
- [ ] Draft pages are marked with `draft: true`
## Integration with V Code
When working with the Site module in V code:
```v
import incubaid.herolib.web.site
import incubaid.herolib.core.playbook
// Process HeroScript files
mut plbook := playbook.new(path: '/path/to/heroscripts')!
site.play(mut plbook)!
// Access configured site
mut mysite := site.get(name: 'my_site')!
// Iterate through pages
for page in mysite.pages {
println('Page: ${page.name} - ${page.description}')
}
// Iterate through sections
for section in mysite.sections {
println('Section: ${section.label}')
}
```
## Summary
The Site module's HeroScript format provides a declarative way to configure websites with:
- Clear separation of concerns (config, navigation, content)
- Automatic ordering and organization
- Collection and category persistence for reduced repetition
- Flexible metadata and SEO configuration
- Support for both internal and external content
Always follow the execution order rules, use numeric file prefixes, and provide complete metadata for best results.

View File

@@ -1,12 +0,0 @@
module site
// Page represents a single documentation page
pub struct Page {
pub mut:
id string // Unique identifier: "collection:page_name"
title string // Display title (optional, extracted from markdown if empty)
description string // Brief description for metadata
draft bool // Mark as draft (hidden from navigation)
hide_title bool // Hide the title when rendering
src string // Source reference (same as id in this format)
}

View File

@@ -1,104 +0,0 @@
module site
import json
// ============================================================================
// Sidebar Navigation Models (Domain Types)
// ============================================================================
pub struct SideBar {
pub mut:
my_sidebar []NavItem
}
pub type NavItem = NavDoc | NavCat | NavLink
pub struct NavDoc {
pub:
id string
label string
}
pub struct NavCat {
pub mut:
label string
collapsible bool = true
collapsed bool
items []NavItem
}
pub struct NavLink {
pub:
label string
href string
description string
}
// ============================================================================
// JSON Serialization Struct (unified to avoid sum type _type field)
// ============================================================================
struct SidebarItem {
typ string @[json: 'type']
id string @[omitempty]
label string
href string @[omitempty]
description string @[omitempty]
collapsible bool @[json: 'collapsible'; omitempty]
collapsed bool @[json: 'collapsed'; omitempty]
items []SidebarItem @[omitempty]
}
// ============================================================================
// JSON Serialization
// ============================================================================
pub fn (sb SideBar) sidebar_to_json() !string {
items := sb.my_sidebar.map(to_sidebar_item(it))
return json.encode_pretty(items)
}
fn to_sidebar_item(item NavItem) SidebarItem {
return match item {
NavDoc { from_doc(item) }
NavLink { from_link(item) }
NavCat { from_category(item) }
}
}
fn from_doc(doc NavDoc) SidebarItem {
return SidebarItem{
typ: 'doc'
id: extract_page_id(doc.id)
label: doc.label
}
}
fn from_link(link NavLink) SidebarItem {
return SidebarItem{
typ: 'link'
label: link.label
href: link.href
description: link.description
}
}
fn from_category(cat NavCat) SidebarItem {
return SidebarItem{
typ: 'category'
label: cat.label
collapsible: cat.collapsible
collapsed: cat.collapsed
items: cat.items.map(to_sidebar_item(it))
}
}
// extract_page_id extracts the page name from a "collection:page_name" format.
// If the id doesn't contain a colon, returns the id as-is.
pub fn extract_page_id(id string) string {
parts := id.split(':')
if parts.len == 2 {
return parts[1]
}
return id
}