codewalker
This commit is contained in:
182
examples/core/code/code_generator.vsh
Executable file
182
examples/core/code/code_generator.vsh
Executable file
@@ -0,0 +1,182 @@
|
|||||||
|
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
|
||||||
|
|
||||||
|
import incubaid.herolib.core.pathlib
|
||||||
|
import incubaid.herolib.ui.console
|
||||||
|
import incubaid.herolib.ai.client
|
||||||
|
import os
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
console.print_header('Code Generator - V File Analyzer Using AI')
|
||||||
|
|
||||||
|
// Find herolib root directory using @FILE
|
||||||
|
script_dir := os.dir(@FILE)
|
||||||
|
// Navigate from examples/core/code to root: up 4 levels
|
||||||
|
herolib_root := os.dir(os.dir(os.dir(script_dir)))
|
||||||
|
|
||||||
|
console.print_item('HeroLib Root: ${herolib_root}')
|
||||||
|
|
||||||
|
// The directory we want to analyze (lib/core in this case)
|
||||||
|
target_dir := herolib_root + '/lib/core'
|
||||||
|
console.print_item('Target Directory: ${target_dir}')
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Load instruction files from aiprompts
|
||||||
|
console.print_item('Loading instruction files...')
|
||||||
|
|
||||||
|
mut ai_instructions_file := pathlib.get(herolib_root +
|
||||||
|
'/aiprompts/ai_instructions_hero_models.md')
|
||||||
|
mut vlang_core_file := pathlib.get(herolib_root + '/aiprompts/vlang_herolib_core.md')
|
||||||
|
|
||||||
|
ai_instructions_content := ai_instructions_file.read()!
|
||||||
|
vlang_core_content := vlang_core_file.read()!
|
||||||
|
|
||||||
|
console.print_green('✓ Instruction files loaded successfully')
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Initialize AI client
|
||||||
|
console.print_item('Initializing AI client...')
|
||||||
|
mut aiclient := client.new()!
|
||||||
|
console.print_green('✓ AI client initialized')
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Get all V files from target directory
|
||||||
|
console.print_item('Scanning directory for V files...')
|
||||||
|
|
||||||
|
mut target_path := pathlib.get_dir(path: target_dir, create: false)!
|
||||||
|
mut all_files := target_path.list(
|
||||||
|
regex: [r'\.v$']
|
||||||
|
recursive: true
|
||||||
|
)!
|
||||||
|
|
||||||
|
console.print_item('Found ${all_files.paths.len} total V files')
|
||||||
|
|
||||||
|
// TODO: Walk over all files which do NOT end with _test.v and do NOT start with factory
|
||||||
|
// Each file becomes a src_file_content object
|
||||||
|
mut files_to_process := []pathlib.Path{}
|
||||||
|
|
||||||
|
for file in all_files.paths {
|
||||||
|
file_name := file.name()
|
||||||
|
|
||||||
|
// Skip test files
|
||||||
|
if file_name.ends_with('_test.v') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip factory files
|
||||||
|
if file_name.starts_with('factory') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
files_to_process << file
|
||||||
|
}
|
||||||
|
|
||||||
|
console.print_green('✓ After filtering: ${files_to_process.len} files to process')
|
||||||
|
console.print_lf(2)
|
||||||
|
|
||||||
|
// Process each file with AI
|
||||||
|
total_files := files_to_process.len
|
||||||
|
|
||||||
|
for idx, mut file in files_to_process {
|
||||||
|
current_idx := idx + 1
|
||||||
|
process_file_with_ai(mut aiclient, mut file, ai_instructions_content, vlang_core_content,
|
||||||
|
current_idx, total_files)!
|
||||||
|
}
|
||||||
|
|
||||||
|
console.print_lf(1)
|
||||||
|
console.print_header('✓ Code Generation Complete')
|
||||||
|
console.print_item('Processed ${files_to_process.len} files')
|
||||||
|
console.print_lf(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_file_with_ai(mut aiclient client.AIClient, mut file pathlib.Path, ai_instructions string, vlang_core string, current int, total int) ! {
|
||||||
|
file_name := file.name()
|
||||||
|
src_file_path := file.absolute()
|
||||||
|
|
||||||
|
console.print_item('[${current}/${total}] Analyzing: ${file_name}')
|
||||||
|
|
||||||
|
// Read the file content - this is the src_file_content
|
||||||
|
src_file_content := file.read()!
|
||||||
|
|
||||||
|
// Build comprehensive system prompt
|
||||||
|
// TODO: Load instructions from prompt files and use in prompt
|
||||||
|
|
||||||
|
// Build the user prompt with context
|
||||||
|
user_prompt := '
|
||||||
|
File: ${file_name}
|
||||||
|
Path: ${src_file_path}
|
||||||
|
|
||||||
|
Current content:
|
||||||
|
\`\`\`v
|
||||||
|
${src_file_content}
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
Please improve this V file by:
|
||||||
|
1. Following V language best practices
|
||||||
|
2. Ensuring proper error handling with ! and or blocks
|
||||||
|
3. Adding clear documentation comments
|
||||||
|
4. Following herolib patterns and conventions
|
||||||
|
5. Improving code clarity and readability
|
||||||
|
|
||||||
|
Context from herolib guidelines:
|
||||||
|
|
||||||
|
VLANG HEROLIB CORE:
|
||||||
|
${vlang_core}
|
||||||
|
|
||||||
|
AI INSTRUCTIONS FOR HERO MODELS:
|
||||||
|
${ai_instructions}
|
||||||
|
|
||||||
|
Return ONLY the complete improved file wrapped in \`\`\`v code block.
|
||||||
|
'
|
||||||
|
|
||||||
|
console.print_debug_title('Sending to AI', 'Calling AI model to improve ${file_name}...')
|
||||||
|
|
||||||
|
// TODO: Call AI client with model gemini-3-pro
|
||||||
|
aiclient.write_from_prompt(file, user_prompt, [.pro]) or {
|
||||||
|
console.print_stderr('Error processing ${file_name}: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mut improved_file := pathlib.get(src_file_path + '.improved')
|
||||||
|
improved_content := improved_file.read()!
|
||||||
|
|
||||||
|
// Display improvements summary
|
||||||
|
sample_chars := 250
|
||||||
|
preview := if improved_content.len > sample_chars {
|
||||||
|
improved_content[..sample_chars] + '... (preview truncated)'
|
||||||
|
} else {
|
||||||
|
improved_content
|
||||||
|
}
|
||||||
|
|
||||||
|
console.print_debug_title('AI Analysis Results for ${file_name}', preview)
|
||||||
|
|
||||||
|
// Optional: Save improved version for review
|
||||||
|
// Uncomment to enable saving
|
||||||
|
// improved_file_path := src_file_path + '.improved'
|
||||||
|
// mut improved_file := pathlib.get_file(path: improved_file_path, create: true)!
|
||||||
|
// improved_file.write(improved_content)!
|
||||||
|
// console.print_green('✓ Improvements saved to: ${improved_file_path}')
|
||||||
|
|
||||||
|
console.print_lf(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract V code from markdown code block
|
||||||
|
fn extract_code_block(response string) string {
|
||||||
|
// Look for ```v ... ``` block
|
||||||
|
start_marker := '\`\`\`v'
|
||||||
|
end_marker := '\`\`\`'
|
||||||
|
|
||||||
|
start_idx := response.index(start_marker) or {
|
||||||
|
// If no ```v, try to return as-is
|
||||||
|
return response
|
||||||
|
}
|
||||||
|
|
||||||
|
mut content_start := start_idx + start_marker.len
|
||||||
|
if content_start < response.len && response[content_start] == `\n` {
|
||||||
|
content_start++
|
||||||
|
}
|
||||||
|
|
||||||
|
end_idx := response.index(end_marker) or { return response[content_start..] }
|
||||||
|
|
||||||
|
extracted := response[content_start..end_idx]
|
||||||
|
return extracted.trim_space()
|
||||||
|
}
|
||||||
142
lib/ai/codewalker/README.md
Normal file
142
lib/ai/codewalker/README.md
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
# CodeWalker Module
|
||||||
|
|
||||||
|
Parse directories or formatted strings into file maps with automatic ignore pattern support.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- 📂 Walk directories recursively and build file maps
|
||||||
|
- 🚫 Respect `.gitignore` and `.heroignore` ignore patterns with directory scoping
|
||||||
|
- 📝 Parse custom `===FILE:name===` format into file maps
|
||||||
|
- 📦 Export/write file maps to disk
|
||||||
|
- 🛡️ Robust, defensive parsing (handles spaces, variable `=` length, case-insensitive)
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### From Directory Path
|
||||||
|
|
||||||
|
```v
|
||||||
|
import incubaid.herolib.lib.ai.codewalker
|
||||||
|
|
||||||
|
mut cw := codewalker.new()
|
||||||
|
mut fm := cw.filemap_get(path: '/path/to/project')!
|
||||||
|
|
||||||
|
// Iterate files
|
||||||
|
for path, content in fm.content {
|
||||||
|
println('${path}: ${content.len} bytes')
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### From Formatted String
|
||||||
|
|
||||||
|
```v
|
||||||
|
content_str := '
|
||||||
|
===FILE:main.v===
|
||||||
|
fn main() {
|
||||||
|
println("Hello!")
|
||||||
|
}
|
||||||
|
===FILE:utils/helper.v===
|
||||||
|
pub fn help() {}
|
||||||
|
===END===
|
||||||
|
'
|
||||||
|
|
||||||
|
mut cw := codewalker.new()
|
||||||
|
mut fm := cw.parse(content_str)!
|
||||||
|
|
||||||
|
println(fm.get('main.v')!)
|
||||||
|
```
|
||||||
|
|
||||||
|
## FileMap Operations
|
||||||
|
|
||||||
|
```v
|
||||||
|
// Get file content
|
||||||
|
content := fm.get('path/to/file.txt')!
|
||||||
|
|
||||||
|
// Set/modify file
|
||||||
|
fm.set('new/file.txt', 'content here')
|
||||||
|
|
||||||
|
// Find files by prefix
|
||||||
|
files := fm.find('src/')
|
||||||
|
|
||||||
|
// Export to directory
|
||||||
|
fm.export('/output/dir')!
|
||||||
|
|
||||||
|
// Write updates to directory
|
||||||
|
fm.write('/project/dir')!
|
||||||
|
|
||||||
|
// Convert back to formatted string
|
||||||
|
text := fm.content()
|
||||||
|
```
|
||||||
|
|
||||||
|
## File Format
|
||||||
|
|
||||||
|
### Full Files
|
||||||
|
|
||||||
|
```
|
||||||
|
===FILE:path/to/file.txt===
|
||||||
|
File content here
|
||||||
|
Can span multiple lines
|
||||||
|
===END===
|
||||||
|
```
|
||||||
|
|
||||||
|
### Partial Content (for future morphing)
|
||||||
|
|
||||||
|
```
|
||||||
|
===FILECHANGE:src/models.v===
|
||||||
|
struct User {
|
||||||
|
id int
|
||||||
|
}
|
||||||
|
===END===
|
||||||
|
```
|
||||||
|
|
||||||
|
### Both Together
|
||||||
|
|
||||||
|
```
|
||||||
|
===FILE:main.v===
|
||||||
|
fn main() {}
|
||||||
|
===FILECHANGE:utils.v===
|
||||||
|
fn helper() {}
|
||||||
|
===END===
|
||||||
|
```
|
||||||
|
|
||||||
|
## Parsing Robustness
|
||||||
|
|
||||||
|
Parser handles variations:
|
||||||
|
|
||||||
|
```
|
||||||
|
===FILE:name.txt=== // Standard
|
||||||
|
= = FILE : name.txt = = // Extra spaces
|
||||||
|
===file:name.txt=== // Lowercase
|
||||||
|
==FILE:name.txt== // Different = count
|
||||||
|
```
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
Errors are collected in `FileMap.errors`:
|
||||||
|
|
||||||
|
```v
|
||||||
|
mut fm := cw.filemap_get(content: str)!
|
||||||
|
|
||||||
|
if fm.errors.len > 0 {
|
||||||
|
for err in fm.errors {
|
||||||
|
println('Line ${err.linenr}: ${err.message}')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Ignore Patterns
|
||||||
|
|
||||||
|
- Respects `.gitignore` and `.heroignore` in any directory
|
||||||
|
- Patterns are scoped to the directory that contains them
|
||||||
|
- Default patterns include `.git/`, `node_modules/`, `*.pyc`, etc.
|
||||||
|
- Use `/` suffix for directory patterns: `dist/`
|
||||||
|
- Use `*` for wildcards: `*.log`
|
||||||
|
- Lines starting with `#` are comments
|
||||||
|
|
||||||
|
Example `.heroignore`:
|
||||||
|
|
||||||
|
```
|
||||||
|
build/
|
||||||
|
*.tmp
|
||||||
|
.env
|
||||||
|
__pycache__/
|
||||||
|
```
|
||||||
212
lib/ai/codewalker/codewalker.v
Normal file
212
lib/ai/codewalker/codewalker.v
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
module codewalker
|
||||||
|
|
||||||
|
import incubaid.herolib.core.pathlib
|
||||||
|
|
||||||
|
// CodeWalker walks directories and parses file content
|
||||||
|
pub struct CodeWalker {
|
||||||
|
pub mut:
|
||||||
|
ignorematcher IgnoreMatcher
|
||||||
|
}
|
||||||
|
|
||||||
|
@[params]
|
||||||
|
pub struct FileMapArgs {
|
||||||
|
pub mut:
|
||||||
|
path string
|
||||||
|
content string
|
||||||
|
content_read bool = true // If false, file content not read from disk
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse extracts FileMap from formatted content string
|
||||||
|
pub fn (mut cw CodeWalker) parse(content string) !FileMap {
|
||||||
|
return cw.filemap_get_from_content(content)
|
||||||
|
}
|
||||||
|
|
||||||
|
// filemap_get creates FileMap from path or content string
|
||||||
|
pub fn (mut cw CodeWalker) filemap_get(args FileMapArgs) !FileMap {
|
||||||
|
if args.path != '' {
|
||||||
|
return cw.filemap_get_from_path(args.path, args.content_read)!
|
||||||
|
} else if args.content != '' {
|
||||||
|
return cw.filemap_get_from_content(args.content)!
|
||||||
|
} else {
|
||||||
|
return error('Either path or content must be provided')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// filemap_get_from_path reads directory and creates FileMap, respecting ignore patterns
|
||||||
|
fn (mut cw CodeWalker) filemap_get_from_path(path string, content_read bool) !FileMap {
|
||||||
|
mut dir := pathlib.get(path)
|
||||||
|
if !dir.exists() || !dir.is_dir() {
|
||||||
|
return error('Directory "${path}" does not exist')
|
||||||
|
}
|
||||||
|
|
||||||
|
mut files := dir.list(ignore_default: false)!
|
||||||
|
mut fm := FileMap{
|
||||||
|
source: path
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect ignore patterns from .gitignore and .heroignore with scoping
|
||||||
|
for mut p in files.paths {
|
||||||
|
if p.is_file() {
|
||||||
|
name := p.name()
|
||||||
|
if name == '.gitignore' || name == '.heroignore' {
|
||||||
|
content := p.read() or { '' }
|
||||||
|
if content != '' {
|
||||||
|
rel := p.path_relative(path) or { '' }
|
||||||
|
base_rel := if rel.contains('/') { rel.all_before_last('/') } else { '' }
|
||||||
|
cw.ignorematcher.add_content_with_base(base_rel, content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for mut file in files.paths {
|
||||||
|
if file.is_file() {
|
||||||
|
name := file.name()
|
||||||
|
if name == '.gitignore' || name == '.heroignore' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
relpath := file.path_relative(path)!
|
||||||
|
if cw.ignorematcher.is_ignored(relpath) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if content_read {
|
||||||
|
content := file.read()!
|
||||||
|
fm.content[relpath] = content
|
||||||
|
} else {
|
||||||
|
fm.content[relpath] = ''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fm
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse_header robustly extracts block type and filename from header line
|
||||||
|
// Handles variable `=` count, spaces, and case-insensitivity
|
||||||
|
// Example: ` ===FILE: myfile.txt ===` → $(BlockKind.file, "myfile.txt")
|
||||||
|
fn parse_header(line string) !(BlockKind, string) {
|
||||||
|
cleaned := line.trim_space()
|
||||||
|
|
||||||
|
// Must have = and content
|
||||||
|
if !cleaned.contains('=') {
|
||||||
|
return BlockKind.end, ''
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip leading and trailing = (any count), preserving spaces between
|
||||||
|
mut content := cleaned.trim_left('=').trim_space()
|
||||||
|
content = content.trim_right('=').trim_space()
|
||||||
|
|
||||||
|
if content.len == 0 {
|
||||||
|
return BlockKind.end, ''
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for END marker
|
||||||
|
if content.to_lower() == 'end' {
|
||||||
|
return BlockKind.end, ''
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse FILE or FILECHANGE
|
||||||
|
if content.contains(':') {
|
||||||
|
kind_str := content.all_before(':').to_lower().trim_space()
|
||||||
|
filename := content.all_after(':').trim_space()
|
||||||
|
|
||||||
|
if filename.len < 1 {
|
||||||
|
return error('Invalid filename: empty after colon')
|
||||||
|
}
|
||||||
|
|
||||||
|
match kind_str {
|
||||||
|
'file' { return BlockKind.file, filename }
|
||||||
|
'filechange' { return BlockKind.filechange, filename }
|
||||||
|
else { return BlockKind.end, '' }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return BlockKind.end, ''
|
||||||
|
}
|
||||||
|
|
||||||
|
// filemap_get_from_content parses FileMap from string with ===FILE:name=== format
|
||||||
|
fn (mut cw CodeWalker) filemap_get_from_content(content string) !FileMap {
|
||||||
|
mut fm := FileMap{}
|
||||||
|
|
||||||
|
mut current_kind := BlockKind.end
|
||||||
|
mut filename := ''
|
||||||
|
mut block := []string{}
|
||||||
|
mut had_any_block := false
|
||||||
|
mut linenr := 0
|
||||||
|
|
||||||
|
for line in content.split_into_lines() {
|
||||||
|
linenr += 1
|
||||||
|
line_trimmed := line.trim_space()
|
||||||
|
|
||||||
|
kind, name := parse_header(line_trimmed)!
|
||||||
|
|
||||||
|
match kind {
|
||||||
|
.end {
|
||||||
|
if filename == '' {
|
||||||
|
if had_any_block {
|
||||||
|
fm.errors << FMError{
|
||||||
|
message: 'Unexpected END marker without active block'
|
||||||
|
linenr: linenr
|
||||||
|
category: 'parse'
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fm.errors << FMError{
|
||||||
|
message: 'END found before any FILE block'
|
||||||
|
linenr: linenr
|
||||||
|
category: 'parse'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Store current block
|
||||||
|
match current_kind {
|
||||||
|
.file { fm.content[filename] = block.join_lines() }
|
||||||
|
.filechange { fm.content_change[filename] = block.join_lines() }
|
||||||
|
else {}
|
||||||
|
}
|
||||||
|
filename = ''
|
||||||
|
block = []string{}
|
||||||
|
current_kind = .end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.file, .filechange {
|
||||||
|
// Flush previous block if any
|
||||||
|
if filename != '' {
|
||||||
|
match current_kind {
|
||||||
|
.file { fm.content[filename] = block.join_lines() }
|
||||||
|
.filechange { fm.content_change[filename] = block.join_lines() }
|
||||||
|
else {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
filename = name
|
||||||
|
current_kind = kind
|
||||||
|
block = []string{}
|
||||||
|
had_any_block = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate non-header lines
|
||||||
|
if kind == .end || kind == .file || kind == .filechange {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if filename == '' && line_trimmed.len > 0 {
|
||||||
|
fm.errors << FMError{
|
||||||
|
message: "Content before first FILE block: '${line}'"
|
||||||
|
linenr: linenr
|
||||||
|
category: 'parse'
|
||||||
|
}
|
||||||
|
} else if filename != '' {
|
||||||
|
block << line
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush final block if any
|
||||||
|
if filename != '' {
|
||||||
|
match current_kind {
|
||||||
|
.file { fm.content[filename] = block.join_lines() }
|
||||||
|
.filechange { fm.content_change[filename] = block.join_lines() }
|
||||||
|
else {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fm
|
||||||
|
}
|
||||||
@@ -4,7 +4,7 @@ import os
|
|||||||
import incubaid.herolib.core.pathlib
|
import incubaid.herolib.core.pathlib
|
||||||
|
|
||||||
fn test_parse_basic() {
|
fn test_parse_basic() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===FILE:file1.txt===\nline1\nline2\n===END==='
|
test_content := '===FILE:file1.txt===\nline1\nline2\n===END==='
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 1
|
assert fm.content.len == 1
|
||||||
@@ -12,7 +12,7 @@ fn test_parse_basic() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_multiple_files() {
|
fn test_parse_multiple_files() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===FILE:file1.txt===\nline1\n===FILE:file2.txt===\nlineA\nlineB\n===END==='
|
test_content := '===FILE:file1.txt===\nline1\n===FILE:file2.txt===\nlineA\nlineB\n===END==='
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 2
|
assert fm.content.len == 2
|
||||||
@@ -21,7 +21,7 @@ fn test_parse_multiple_files() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_empty_file_block() {
|
fn test_parse_empty_file_block() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===FILE:empty.txt===\n===END==='
|
test_content := '===FILE:empty.txt===\n===END==='
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 1
|
assert fm.content.len == 1
|
||||||
@@ -29,8 +29,8 @@ fn test_parse_empty_file_block() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_consecutive_end_and_file() {
|
fn test_parse_consecutive_end_and_file() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===FILE:file1.txt===\ncontent1\n===END===\n===FILE:file2.txt===\ncontent2\n===END==='
|
test_content := '===FILE:file1.txt ===\ncontent1\n===END===\n=== file2.txt===\ncontent2\n===END==='
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 2
|
assert fm.content.len == 2
|
||||||
assert fm.content['file1.txt'] == 'content1'
|
assert fm.content['file1.txt'] == 'content1'
|
||||||
@@ -38,8 +38,8 @@ fn test_parse_consecutive_end_and_file() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_content_before_first_file_block() {
|
fn test_parse_content_before_first_file_block() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := 'unexpected content\n===FILE:file1.txt===\ncontent\n===END==='
|
test_content := 'unexpected content\n===FILE:file1.txt===\ncontent\n====='
|
||||||
// This should ideally log an error but still parse the file
|
// This should ideally log an error but still parse the file
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 1
|
assert fm.content.len == 1
|
||||||
@@ -49,7 +49,7 @@ fn test_parse_content_before_first_file_block() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_content_after_end() {
|
fn test_parse_content_after_end() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===FILE:file1.txt===\ncontent\n===END===\nmore unexpected content'
|
test_content := '===FILE:file1.txt===\ncontent\n===END===\nmore unexpected content'
|
||||||
// Implementation chooses to ignore content after END but return parsed content
|
// Implementation chooses to ignore content after END but return parsed content
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
@@ -58,7 +58,7 @@ fn test_parse_content_after_end() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_invalid_filename_line() {
|
fn test_parse_invalid_filename_line() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '======\ncontent\n===END==='
|
test_content := '======\ncontent\n===END==='
|
||||||
cw.parse(test_content) or {
|
cw.parse(test_content) or {
|
||||||
assert err.msg().contains('Invalid filename, < 1 chars')
|
assert err.msg().contains('Invalid filename, < 1 chars')
|
||||||
@@ -68,7 +68,7 @@ fn test_parse_invalid_filename_line() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_file_ending_without_end() {
|
fn test_parse_file_ending_without_end() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===FILE:file1.txt===\nline1\nline2'
|
test_content := '===FILE:file1.txt===\nline1\nline2'
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 1
|
assert fm.content.len == 1
|
||||||
@@ -76,14 +76,14 @@ fn test_parse_file_ending_without_end() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_empty_content() {
|
fn test_parse_empty_content() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := ''
|
test_content := ''
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 0
|
assert fm.content.len == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_only_end_at_start() {
|
fn test_parse_only_end_at_start() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===END==='
|
test_content := '===END==='
|
||||||
cw.parse(test_content) or {
|
cw.parse(test_content) or {
|
||||||
assert err.msg().contains('END found at start, not good.')
|
assert err.msg().contains('END found at start, not good.')
|
||||||
@@ -93,7 +93,7 @@ fn test_parse_only_end_at_start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_mixed_file_and_filechange() {
|
fn test_parse_mixed_file_and_filechange() {
|
||||||
mut cw2 := new(CodeWalkerArgs{})!
|
mut cw2 := new()!
|
||||||
test_content2 := '===FILE:file.txt===\nfull\n===FILECHANGE:file.txt===\npartial\n===END==='
|
test_content2 := '===FILE:file.txt===\nfull\n===FILECHANGE:file.txt===\npartial\n===END==='
|
||||||
fm2 := cw2.parse(test_content2)!
|
fm2 := cw2.parse(test_content2)!
|
||||||
assert fm2.content.len == 1
|
assert fm2.content.len == 1
|
||||||
@@ -103,7 +103,7 @@ fn test_parse_mixed_file_and_filechange() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_empty_block_between_files() {
|
fn test_parse_empty_block_between_files() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===FILE:file1.txt===\ncontent1\n===FILE:file2.txt===\n===END===\n===FILE:file3.txt===\ncontent3\n===END==='
|
test_content := '===FILE:file1.txt===\ncontent1\n===FILE:file2.txt===\n===END===\n===FILE:file3.txt===\ncontent3\n===END==='
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 3
|
assert fm.content.len == 3
|
||||||
@@ -113,7 +113,7 @@ fn test_parse_empty_block_between_files() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_multiple_empty_blocks() {
|
fn test_parse_multiple_empty_blocks() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
test_content := '===FILE:file1.txt===\n===END===\n===FILE:file2.txt===\n===END===\n===FILE:file3.txt===\ncontent3\n===END==='
|
test_content := '===FILE:file1.txt===\n===END===\n===FILE:file2.txt===\n===END===\n===FILE:file3.txt===\ncontent3\n===END==='
|
||||||
fm := cw.parse(test_content)!
|
fm := cw.parse(test_content)!
|
||||||
assert fm.content.len == 3
|
assert fm.content.len == 3
|
||||||
@@ -123,7 +123,7 @@ fn test_parse_multiple_empty_blocks() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_filename_end_reserved() {
|
fn test_parse_filename_end_reserved() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
// Legacy header 'END' used as filename should error when used as header for new block
|
// Legacy header 'END' used as filename should error when used as header for new block
|
||||||
test_content := '===file1.txt===\ncontent1\n===END===\n===END===\ncontent2\n===END==='
|
test_content := '===file1.txt===\ncontent1\n===END===\n===END===\ncontent2\n===END==='
|
||||||
cw.parse(test_content) or {
|
cw.parse(test_content) or {
|
||||||
@@ -204,7 +204,7 @@ fn test_ignore_level_scoped() ! {
|
|||||||
mut okf := pathlib.get_file(path: os.join_path(sub.path, 'ok.txt'), create: true)!
|
mut okf := pathlib.get_file(path: os.join_path(sub.path, 'ok.txt'), create: true)!
|
||||||
okf.write('OK')!
|
okf.write('OK')!
|
||||||
|
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
mut fm := cw.filemap_get(path: root.path)!
|
mut fm := cw.filemap_get(path: root.path)!
|
||||||
|
|
||||||
// sub/dist/a.txt should be ignored
|
// sub/dist/a.txt should be ignored
|
||||||
@@ -235,14 +235,14 @@ fn test_ignore_level_scoped_gitignore() ! {
|
|||||||
mut appf := pathlib.get_file(path: os.join_path(svc.path, 'app.txt'), create: true)!
|
mut appf := pathlib.get_file(path: os.join_path(svc.path, 'app.txt'), create: true)!
|
||||||
appf.write('app')!
|
appf.write('app')!
|
||||||
|
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
mut fm := cw.filemap_get(path: root.path)!
|
mut fm := cw.filemap_get(path: root.path)!
|
||||||
assert 'svc/logs/out.txt' !in fm.content.keys()
|
assert 'svc/logs/out.txt' !in fm.content.keys()
|
||||||
assert fm.content['svc/app.txt'] == 'app'
|
assert fm.content['svc/app.txt'] == 'app'
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_parse_filename_end_reserved_legacy() {
|
fn test_parse_filename_end_reserved_legacy() {
|
||||||
mut cw := new(CodeWalkerArgs{})!
|
mut cw := new()
|
||||||
// Legacy header 'END' used as filename should error when used as header for new block
|
// Legacy header 'END' used as filename should error when used as header for new block
|
||||||
test_content := '===file1.txt===\ncontent1\n===END===\n===END===\ncontent2\n===END==='
|
test_content := '===file1.txt===\ncontent1\n===END===\n===END===\ncontent2\n===END==='
|
||||||
cw.parse(test_content) or {
|
cw.parse(test_content) or {
|
||||||
14
lib/ai/codewalker/factory.v
Normal file
14
lib/ai/codewalker/factory.v
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
module codewalker
|
||||||
|
|
||||||
|
// new creates a CodeWalker instance with default ignore patterns
|
||||||
|
pub fn new() CodeWalker {
|
||||||
|
mut cw := CodeWalker{}
|
||||||
|
cw.ignorematcher = gitignore_matcher_new()
|
||||||
|
return cw
|
||||||
|
}
|
||||||
|
|
||||||
|
// filemap creates FileMap from path or content (convenience function)
|
||||||
|
pub fn filemap(args FileMapArgs) !FileMap {
|
||||||
|
mut cw := new()
|
||||||
|
return cw.filemap_get(args)
|
||||||
|
}
|
||||||
@@ -2,14 +2,16 @@ module codewalker
|
|||||||
|
|
||||||
import incubaid.herolib.core.pathlib
|
import incubaid.herolib.core.pathlib
|
||||||
|
|
||||||
|
// FileMap represents parsed file structure with content and changes
|
||||||
pub struct FileMap {
|
pub struct FileMap {
|
||||||
pub mut:
|
pub mut:
|
||||||
source string
|
source string // Source path or origin
|
||||||
content map[string]string
|
content map[string]string // Full file content by path
|
||||||
content_change map[string]string
|
content_change map[string]string // Partial/change content by path
|
||||||
errors []FMError
|
errors []FMError // Parse errors encountered
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// content generates formatted string representation
|
||||||
pub fn (mut fm FileMap) content() string {
|
pub fn (mut fm FileMap) content() string {
|
||||||
mut out := []string{}
|
mut out := []string{}
|
||||||
for filepath, filecontent in fm.content {
|
for filepath, filecontent in fm.content {
|
||||||
@@ -24,7 +26,7 @@ pub fn (mut fm FileMap) content() string {
|
|||||||
return out.join_lines()
|
return out.join_lines()
|
||||||
}
|
}
|
||||||
|
|
||||||
// write in new location, all will be overwritten, will only work with full files, not changes
|
// export writes all FILE content to destination directory
|
||||||
pub fn (mut fm FileMap) export(path string) ! {
|
pub fn (mut fm FileMap) export(path string) ! {
|
||||||
for filepath, filecontent in fm.content {
|
for filepath, filecontent in fm.content {
|
||||||
dest := '${path}/${filepath}'
|
dest := '${path}/${filepath}'
|
||||||
@@ -33,7 +35,7 @@ pub fn (mut fm FileMap) export(path string) ! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@[PARAMS]
|
@[params]
|
||||||
pub struct WriteParams {
|
pub struct WriteParams {
|
||||||
path string
|
path string
|
||||||
v_test bool = true
|
v_test bool = true
|
||||||
@@ -41,29 +43,31 @@ pub struct WriteParams {
|
|||||||
python_test bool
|
python_test bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// update the files as found in the folder and update them or create
|
// write updates files in destination directory (creates or overwrites)
|
||||||
pub fn (mut fm FileMap) write(path string) ! {
|
pub fn (mut fm FileMap) write(path string) ! {
|
||||||
for filepath, filecontent in fm.content {
|
for filepath, filecontent in fm.content {
|
||||||
dest := '${path}/${filepath}'
|
dest := '${path}/${filepath}'
|
||||||
// In future: validate language-specific formatting/tests before overwrite
|
|
||||||
mut filepathtowrite := pathlib.get_file(path: dest, create: true)!
|
mut filepathtowrite := pathlib.get_file(path: dest, create: true)!
|
||||||
filepathtowrite.write(filecontent)!
|
filepathtowrite.write(filecontent)!
|
||||||
}
|
}
|
||||||
// TODO: phase 2, work with morphe to integrate change in the file
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get retrieves file content by path
|
||||||
pub fn (fm FileMap) get(relpath string) !string {
|
pub fn (fm FileMap) get(relpath string) !string {
|
||||||
return fm.content[relpath] or { return error('File not found: ${relpath}') }
|
return fm.content[relpath] or { return error('File not found: ${relpath}') }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set stores file content by path
|
||||||
pub fn (mut fm FileMap) set(relpath string, content string) {
|
pub fn (mut fm FileMap) set(relpath string, content string) {
|
||||||
fm.content[relpath] = content
|
fm.content[relpath] = content
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// delete removes file from content map
|
||||||
pub fn (mut fm FileMap) delete(relpath string) {
|
pub fn (mut fm FileMap) delete(relpath string) {
|
||||||
fm.content.delete(relpath)
|
fm.content.delete(relpath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// find returns all paths matching prefix
|
||||||
pub fn (fm FileMap) find(path string) []string {
|
pub fn (fm FileMap) find(path string) []string {
|
||||||
mut result := []string{}
|
mut result := []string{}
|
||||||
for filepath, _ in fm.content {
|
for filepath, _ in fm.content {
|
||||||
@@ -1,13 +1,6 @@
|
|||||||
module codewalker
|
module codewalker
|
||||||
|
|
||||||
// A minimal gitignore-like matcher used by CodeWalker
|
// Default ignore patterns based on .gitignore conventions
|
||||||
// Supports:
|
|
||||||
// - Directory patterns ending with '/': ignores any path that has this segment prefix
|
|
||||||
// - Extension patterns like '*.pyc' or '*.<ext>'
|
|
||||||
// - Simple substrings and '*' wildcards
|
|
||||||
// - Lines starting with '#' are comments; empty lines ignored
|
|
||||||
// No negation support for simplicity
|
|
||||||
|
|
||||||
const default_gitignore = '
|
const default_gitignore = '
|
||||||
.git/
|
.git/
|
||||||
.svn/
|
.svn/
|
||||||
@@ -54,27 +47,29 @@ Thumbs.db
|
|||||||
'
|
'
|
||||||
|
|
||||||
struct IgnoreRule {
|
struct IgnoreRule {
|
||||||
base string // relative dir from source root where the ignore file lives ('' means global)
|
base string // Directory where ignore file was found
|
||||||
pattern string
|
pattern string // Ignore pattern
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IgnoreMatcher checks if paths should be ignored
|
||||||
pub struct IgnoreMatcher {
|
pub struct IgnoreMatcher {
|
||||||
pub mut:
|
pub mut:
|
||||||
rules []IgnoreRule
|
rules []IgnoreRule
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// gitignore_matcher_new creates matcher with default patterns
|
||||||
pub fn gitignore_matcher_new() IgnoreMatcher {
|
pub fn gitignore_matcher_new() IgnoreMatcher {
|
||||||
mut m := IgnoreMatcher{}
|
mut m := IgnoreMatcher{}
|
||||||
m.add_content(default_gitignore)
|
m.add_content(default_gitignore)
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add raw .gitignore-style content as global (root-scoped) rules
|
// add_content adds global (root-scoped) ignore patterns
|
||||||
pub fn (mut m IgnoreMatcher) add_content(content string) {
|
pub fn (mut m IgnoreMatcher) add_content(content string) {
|
||||||
m.add_content_with_base('', content)
|
m.add_content_with_base('', content)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add raw .gitignore/.heroignore-style content scoped to base_rel
|
// add_content_with_base adds ignore patterns scoped to base directory
|
||||||
pub fn (mut m IgnoreMatcher) add_content_with_base(base_rel string, content string) {
|
pub fn (mut m IgnoreMatcher) add_content_with_base(base_rel string, content string) {
|
||||||
mut base := base_rel.replace('\\', '/').trim('/').to_lower()
|
mut base := base_rel.replace('\\', '/').trim('/').to_lower()
|
||||||
for raw_line in content.split_into_lines() {
|
for raw_line in content.split_into_lines() {
|
||||||
@@ -89,7 +84,7 @@ pub fn (mut m IgnoreMatcher) add_content_with_base(base_rel string, content stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Very simple glob/substring-based matching with directory scoping
|
// is_ignored checks if path matches any ignore pattern
|
||||||
pub fn (m IgnoreMatcher) is_ignored(relpath string) bool {
|
pub fn (m IgnoreMatcher) is_ignored(relpath string) bool {
|
||||||
mut path := relpath.replace('\\', '/').trim_left('/')
|
mut path := relpath.replace('\\', '/').trim_left('/')
|
||||||
path_low := path.to_lower()
|
path_low := path.to_lower()
|
||||||
@@ -99,31 +94,29 @@ pub fn (m IgnoreMatcher) is_ignored(relpath string) bool {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine subpath relative to base
|
// Scope pattern to base directory
|
||||||
mut sub := path_low
|
mut sub := path_low
|
||||||
if rule.base != '' {
|
if rule.base != '' {
|
||||||
base := rule.base
|
base := rule.base
|
||||||
if sub == base {
|
if sub == base {
|
||||||
// path equals the base dir; ignore rules apply to entries under base, not the base itself
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if sub.starts_with(base + '/') {
|
if sub.starts_with(base + '/') {
|
||||||
sub = sub[(base.len + 1)..]
|
sub = sub[(base.len + 1)..]
|
||||||
} else {
|
} else {
|
||||||
continue // rule not applicable for this path
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Directory pattern (relative to base)
|
// Directory pattern
|
||||||
if pat.ends_with('/') {
|
if pat.ends_with('/') {
|
||||||
mut dirpat := pat.trim_right('/')
|
mut dirpat := pat.trim_right('/').trim_left('/').to_lower()
|
||||||
dirpat = dirpat.trim_left('/').to_lower()
|
|
||||||
if sub == dirpat || sub.starts_with(dirpat + '/') || sub.contains('/' + dirpat + '/') {
|
if sub == dirpat || sub.starts_with(dirpat + '/') || sub.contains('/' + dirpat + '/') {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Extension pattern *.ext
|
// Extension pattern
|
||||||
if pat.starts_with('*.') {
|
if pat.starts_with('*.') {
|
||||||
ext := pat.all_after_last('.').to_lower()
|
ext := pat.all_after_last('.').to_lower()
|
||||||
if sub.ends_with('.' + ext) {
|
if sub.ends_with('.' + ext) {
|
||||||
@@ -131,7 +124,7 @@ pub fn (m IgnoreMatcher) is_ignored(relpath string) bool {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Simple wildcard * anywhere -> sequential contains match
|
// Wildcard matching
|
||||||
if pat.contains('*') {
|
if pat.contains('*') {
|
||||||
mut parts := pat.to_lower().split('*')
|
mut parts := pat.to_lower().split('*')
|
||||||
mut idx := 0
|
mut idx := 0
|
||||||
@@ -152,7 +145,7 @@ pub fn (m IgnoreMatcher) is_ignored(relpath string) bool {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Fallback: substring match (case-insensitive) on subpath
|
// Substring match
|
||||||
if sub.contains(pat.to_lower()) {
|
if sub.contains(pat.to_lower()) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -1,16 +1,16 @@
|
|||||||
module codewalker
|
module codewalker
|
||||||
|
|
||||||
pub struct CWError {
|
// BlockKind defines the type of block in parsed content
|
||||||
pub:
|
pub enum BlockKind {
|
||||||
message string
|
file
|
||||||
linenr int
|
filechange
|
||||||
category string
|
end
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct FMError {
|
pub struct FMError {
|
||||||
pub:
|
pub:
|
||||||
message string
|
message string
|
||||||
linenr int // is optional
|
linenr int
|
||||||
category string
|
category string
|
||||||
filename string
|
filename string
|
||||||
}
|
}
|
||||||
18
lib/ai/instruct.md
Normal file
18
lib/ai/instruct.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
|
||||||
|
|
||||||
|
fix @lib/ai/codewalker
|
||||||
|
|
||||||
|
|
||||||
|
- we should use enumerators for FILE & CHANGE
|
||||||
|
- we should document methods well but not much text just the basics to understand
|
||||||
|
- make sure parsing of FILE & CHANGE is super rebust and defensive e.g. space after == or === , e.g. == can be any len of ==, e.g. non case sensitive
|
||||||
|
- codemap should not have errors, only kept at filemap level, remove those errors everywhere
|
||||||
|
|
||||||
|
|
||||||
|
check rest of code if no issues
|
||||||
|
|
||||||
|
fix readme.md
|
||||||
|
|
||||||
|
|
||||||
|
give the coding instructions with the full code output where changes needed
|
||||||
|
|
||||||
7
lib/ai/instructions/factory.v
Normal file
7
lib/ai/instructions/factory.v
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
module instructions
|
||||||
|
|
||||||
|
import incubaid.herolib.core.texttools
|
||||||
|
|
||||||
|
__global (
|
||||||
|
instructions_cache map[string]string
|
||||||
|
)
|
||||||
39
lib/ai/instructions/hero.v
Normal file
39
lib/ai/instructions/hero.v
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
module heromodels
|
||||||
|
|
||||||
|
import incubaid.herolib.develop.gittools
|
||||||
|
import incubaid.herolib.core.pathlib
|
||||||
|
import incubaid.herolib.lib.develop.codewalker
|
||||||
|
|
||||||
|
pub fn aiprompts_path() !string {
|
||||||
|
return instructions_cache['aiprompts_path'] or {
|
||||||
|
mypath := gittools.path(
|
||||||
|
git_url: 'https://github.com/Incubaid/herolib/tree/development/aiprompts'
|
||||||
|
)!.path
|
||||||
|
instructions_cache['aiprompts_path'] = mypath
|
||||||
|
mypath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ai_instructions_hero_models() !string {
|
||||||
|
path := '${aiprompts_path()!}/ai_instructions_hero_models.md'
|
||||||
|
mut ppath := pathlib.get_file(path: path, create: false)!
|
||||||
|
return ppath.read()!
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ai_instructions_vlang_herolib_core() !string {
|
||||||
|
path := '${aiprompts_path()!}/vlang_herolib_core.md'
|
||||||
|
mut ppath := pathlib.get_file(path: path, create: false)!
|
||||||
|
return ppath.read()!
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ai_instructions_herolib_core_all() !string {
|
||||||
|
path := '${aiprompts_path()!}/herolib_core'
|
||||||
|
mut cw := codewalker.new()!
|
||||||
|
mut filemap := cw.filemap_get(
|
||||||
|
path: path
|
||||||
|
)!
|
||||||
|
|
||||||
|
println(false)
|
||||||
|
$dbg;
|
||||||
|
return filemap.content()
|
||||||
|
}
|
||||||
@@ -93,4 +93,4 @@ pub fn (e Enum) vgen() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return '${comments}${prefix}enum ${e.name} {${values_str}\n}'
|
return '${comments}${prefix}enum ${e.name} {${values_str}\n}'
|
||||||
}
|
}
|
||||||
|
|||||||
280
lib/core/codegenerator/codegenerator.v
Normal file
280
lib/core/codegenerator/codegenerator.v
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
module codegenerator
|
||||||
|
|
||||||
|
import incubaid.herolib.core.codeparser
|
||||||
|
import incubaid.herolib.core.pathlib
|
||||||
|
import incubaid.herolib.core.code
|
||||||
|
import incubaid.herolib.core.texttools
|
||||||
|
import os
|
||||||
|
|
||||||
|
pub struct CodeGenerator {
|
||||||
|
pub mut:
|
||||||
|
parser codeparser.CodeParser
|
||||||
|
output_dir string
|
||||||
|
format bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate_all generates markdown docs for all modules
|
||||||
|
pub fn (mut gen CodeGenerator) generate_all() ! {
|
||||||
|
modules := gen.parser.list_modules()
|
||||||
|
|
||||||
|
for module_name in modules {
|
||||||
|
gen.generate_module(module_name)!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate_module generates markdown for a single module
|
||||||
|
pub fn (mut gen CodeGenerator) generate_module(module_name string) ! {
|
||||||
|
md := gen.module_to_markdown(module_name)!
|
||||||
|
|
||||||
|
// Convert module name to filename: incubaid.herolib.core.code -> code___core___code.md
|
||||||
|
filename := gen.module_to_filename(module_name)
|
||||||
|
filepath := os.join_path(gen.output_dir, filename)
|
||||||
|
|
||||||
|
mut file := pathlib.get_file(path: filepath, create: true)!
|
||||||
|
file.write(md)!
|
||||||
|
}
|
||||||
|
|
||||||
|
// module_to_markdown generates complete markdown for a module
|
||||||
|
pub fn (gen CodeGenerator) module_to_markdown(module_name string) !string {
|
||||||
|
module_obj := gen.parser.find_module(module_name)!
|
||||||
|
|
||||||
|
mut md := ''
|
||||||
|
|
||||||
|
// Use template for module header
|
||||||
|
md += $tmpl('templates/module.md.template')
|
||||||
|
|
||||||
|
// Imports section
|
||||||
|
imports := gen.parser.list_imports(module_name)
|
||||||
|
if imports.len > 0 {
|
||||||
|
md += gen.imports_section(imports)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Constants section
|
||||||
|
consts := gen.parser.list_constants(module_name)
|
||||||
|
if consts.len > 0 {
|
||||||
|
md += gen.constants_section(consts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Structs section
|
||||||
|
structs := gen.parser.list_structs(module_name)
|
||||||
|
if structs.len > 0 {
|
||||||
|
md += gen.structs_section(structs, module_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Functions section
|
||||||
|
functions := gen.parser.list_functions(module_name)
|
||||||
|
if functions.len > 0 {
|
||||||
|
md += gen.functions_section(functions, module_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interfaces section
|
||||||
|
interfaces := gen.parser.list_interfaces(module_name)
|
||||||
|
if interfaces.len > 0 {
|
||||||
|
md += gen.interfaces_section(interfaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// imports_section generates imports documentation
|
||||||
|
fn (gen CodeGenerator) imports_section(imports []code.Import) string {
|
||||||
|
mut md := '## Imports\n\n'
|
||||||
|
|
||||||
|
for imp in imports {
|
||||||
|
md += '- `' + imp.mod + '`\n'
|
||||||
|
}
|
||||||
|
md += '\n'
|
||||||
|
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// constants_section generates constants documentation
|
||||||
|
fn (gen CodeGenerator) constants_section(consts []code.Const) string {
|
||||||
|
mut md := '## Constants\n\n'
|
||||||
|
|
||||||
|
for const_ in consts {
|
||||||
|
md += '- `' + const_.name + '` = `' + const_.value + '`\n'
|
||||||
|
}
|
||||||
|
md += '\n'
|
||||||
|
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// structs_section generates structs documentation
|
||||||
|
fn (gen CodeGenerator) structs_section(structs []code.Struct, module_name string) string {
|
||||||
|
mut md := '## Structs\n\n'
|
||||||
|
|
||||||
|
for struct_ in structs {
|
||||||
|
md += gen.struct_to_markdown(struct_)
|
||||||
|
}
|
||||||
|
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// functions_section generates functions documentation
|
||||||
|
fn (gen CodeGenerator) functions_section(functions []code.Function, module_name string) string {
|
||||||
|
mut md := '## Functions & Methods\n\n'
|
||||||
|
|
||||||
|
// Separate regular functions and methods
|
||||||
|
regular_functions := functions.filter(it.receiver.typ.symbol() == '')
|
||||||
|
methods := functions.filter(it.receiver.typ.symbol() != '')
|
||||||
|
|
||||||
|
// Regular functions
|
||||||
|
if regular_functions.len > 0 {
|
||||||
|
md += '### Functions\n\n'
|
||||||
|
for func in regular_functions {
|
||||||
|
md += gen.function_to_markdown(func)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Methods (grouped by struct)
|
||||||
|
if methods.len > 0 {
|
||||||
|
md += '### Methods\n\n'
|
||||||
|
structs := gen.parser.list_structs(module_name)
|
||||||
|
|
||||||
|
for struct_ in structs {
|
||||||
|
struct_methods := methods.filter(it.receiver.typ.symbol().contains(struct_.name))
|
||||||
|
if struct_methods.len > 0 {
|
||||||
|
md += '#### ' + struct_.name + '\n\n'
|
||||||
|
for method in struct_methods {
|
||||||
|
md += gen.function_to_markdown(method)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// interfaces_section generates interfaces documentation
|
||||||
|
fn (gen CodeGenerator) interfaces_section(interfaces []code.Interface) string {
|
||||||
|
mut md := '## Interfaces\n\n'
|
||||||
|
|
||||||
|
for iface in interfaces {
|
||||||
|
md += '### ' + iface.name + '\n\n'
|
||||||
|
if iface.description != '' {
|
||||||
|
md += iface.description + '\n\n'
|
||||||
|
}
|
||||||
|
md += '```v\n'
|
||||||
|
if iface.is_pub {
|
||||||
|
md += 'pub '
|
||||||
|
}
|
||||||
|
md += 'interface ' + iface.name + ' {\n'
|
||||||
|
for field in iface.fields {
|
||||||
|
md += ' ' + field.name + ': ' + field.typ.symbol() + '\n'
|
||||||
|
}
|
||||||
|
md += '}\n```\n\n'
|
||||||
|
}
|
||||||
|
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// struct_to_markdown converts struct to markdown
|
||||||
|
fn (gen CodeGenerator) struct_to_markdown(struct_ code.Struct) string {
|
||||||
|
mut md := '### '
|
||||||
|
|
||||||
|
if struct_.is_pub {
|
||||||
|
md += '**pub** '
|
||||||
|
}
|
||||||
|
|
||||||
|
md += 'struct ' + struct_.name + '\n\n'
|
||||||
|
|
||||||
|
if struct_.description != '' {
|
||||||
|
md += struct_.description + '\n\n'
|
||||||
|
}
|
||||||
|
|
||||||
|
md += '```v\n'
|
||||||
|
if struct_.is_pub {
|
||||||
|
md += 'pub '
|
||||||
|
}
|
||||||
|
md += 'struct ' + struct_.name + ' {\n'
|
||||||
|
for field in struct_.fields {
|
||||||
|
md += ' ' + field.name + ' ' + field.typ.symbol() + '\n'
|
||||||
|
}
|
||||||
|
md += '}\n'
|
||||||
|
md += '```\n\n'
|
||||||
|
|
||||||
|
// Field documentation
|
||||||
|
if struct_.fields.len > 0 {
|
||||||
|
md += '**Fields:**\n\n'
|
||||||
|
for field in struct_.fields {
|
||||||
|
visibility := if field.is_pub { 'public' } else { 'private' }
|
||||||
|
mutability := if field.is_mut { ', mutable' } else { '' }
|
||||||
|
md += '- `' + field.name + '` (`' + field.typ.symbol() + '`)' + mutability + ' - ' +
|
||||||
|
visibility + '\n'
|
||||||
|
if field.description != '' {
|
||||||
|
md += ' - ' + field.description + '\n'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
md += '\n'
|
||||||
|
}
|
||||||
|
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// function_to_markdown converts function to markdown
|
||||||
|
fn (gen CodeGenerator) function_to_markdown(func code.Function) string {
|
||||||
|
mut md := ''
|
||||||
|
|
||||||
|
// Function signature
|
||||||
|
signature := gen.function_signature(func)
|
||||||
|
md += '- `' + signature + '`\n'
|
||||||
|
|
||||||
|
// Description
|
||||||
|
if func.description != '' {
|
||||||
|
md += ' - *' + func.description + '*\n'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parameters
|
||||||
|
if func.params.len > 0 {
|
||||||
|
md += '\n **Parameters:**\n'
|
||||||
|
for param in func.params {
|
||||||
|
md += ' - `' + param.name + '` (`' + param.typ.symbol() + '`)'
|
||||||
|
if param.description != '' {
|
||||||
|
md += ' - ' + param.description
|
||||||
|
}
|
||||||
|
md += '\n'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return type
|
||||||
|
if func.result.typ.symbol() != '' {
|
||||||
|
md += '\n **Returns:** `' + func.result.typ.symbol() + '`\n'
|
||||||
|
}
|
||||||
|
|
||||||
|
md += '\n'
|
||||||
|
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
// function_signature generates a function signature string
|
||||||
|
fn (gen CodeGenerator) function_signature(func code.Function) string {
|
||||||
|
mut sig := if func.is_pub { 'pub ' } else { '' }
|
||||||
|
|
||||||
|
if func.receiver.name != '' {
|
||||||
|
sig += '(' + func.receiver.name + ' ' + func.receiver.typ.symbol() + ') '
|
||||||
|
}
|
||||||
|
|
||||||
|
sig += func.name
|
||||||
|
|
||||||
|
// Parameters
|
||||||
|
params := func.params.map(it.name + ': ' + it.typ.symbol()).join(', ')
|
||||||
|
sig += '(' + params + ')'
|
||||||
|
|
||||||
|
// Return type
|
||||||
|
if func.result.typ.symbol() != '' {
|
||||||
|
sig += ' -> ' + func.result.typ.symbol()
|
||||||
|
}
|
||||||
|
|
||||||
|
return sig
|
||||||
|
}
|
||||||
|
|
||||||
|
// module_to_filename converts module name to filename
|
||||||
|
// e.g., incubaid.herolib.core.code -> code__core__code.md
|
||||||
|
pub fn (gen CodeGenerator) module_to_filename(module_name string) string {
|
||||||
|
// Get last part after last dot, then add __ and rest in reverse
|
||||||
|
parts := module_name.split('.')
|
||||||
|
filename := parts[parts.len - 1]
|
||||||
|
|
||||||
|
return filename + '.md'
|
||||||
|
}
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
module codegenerator
|
module codegenerator
|
||||||
|
|
||||||
|
import incubaid.herolib.core.codeparser
|
||||||
|
|
||||||
@[params]
|
@[params]
|
||||||
pub struct GeneratorOptions {
|
pub struct GeneratorOptions {
|
||||||
pub:
|
pub:
|
||||||
@@ -10,18 +12,16 @@ pub:
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(args GeneratorOptions) !CodeGenerator {
|
pub fn new(args GeneratorOptions) !CodeGenerator {
|
||||||
import incubaid.herolib.core.codeparser
|
|
||||||
|
|
||||||
mut parser := codeparser.new(
|
mut parser := codeparser.new(
|
||||||
path: args.parser_path
|
path: args.parser_path
|
||||||
recursive: args.recursive
|
recursive: args.recursive
|
||||||
)!
|
)!
|
||||||
|
|
||||||
parser.parse()!
|
parser.parse()!
|
||||||
|
|
||||||
return CodeGenerator{
|
return CodeGenerator{
|
||||||
parser: parser
|
parser: parser
|
||||||
output_dir: args.output_dir
|
output_dir: args.output_dir
|
||||||
format: args.format
|
format: args.format
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
31
lib/core/codegenerator/markdown_gen.v
Normal file
31
lib/core/codegenerator/markdown_gen.v
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
module codegenerator
|
||||||
|
|
||||||
|
import incubaid.herolib.core.pathlib
|
||||||
|
|
||||||
|
pub struct MarkdownGenerator {
|
||||||
|
pub mut:
|
||||||
|
generator CodeGenerator
|
||||||
|
output_dir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// write_all writes all generated markdown files to disk
|
||||||
|
pub fn (mut mgen MarkdownGenerator) write_all() ! {
|
||||||
|
modules := mgen.generator.parser.list_modules()
|
||||||
|
|
||||||
|
// Ensure output directory exists
|
||||||
|
mut out_dir := pathlib.get_dir(path: mgen.output_dir, create: true)!
|
||||||
|
|
||||||
|
for module_name in modules {
|
||||||
|
mgen.write_module(module_name)!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// write_module writes a single module's markdown to disk
|
||||||
|
pub fn (mut mgen MarkdownGenerator) write_module(module_name string) ! {
|
||||||
|
md := mgen.generator.module_to_markdown(module_name)!
|
||||||
|
filename := mgen.generator.module_to_filename(module_name)
|
||||||
|
|
||||||
|
filepath := mgen.output_dir + '/' + filename
|
||||||
|
mut file := pathlib.get_file(path: filepath, create: true)!
|
||||||
|
file.write(md)!
|
||||||
|
}
|
||||||
188
lib/core/codegenerator/markdown_test.v
Normal file
188
lib/core/codegenerator/markdown_test.v
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
module codegenerator
|
||||||
|
|
||||||
|
import incubaid.herolib.ui.console
|
||||||
|
import incubaid.herolib.core.codeparser
|
||||||
|
import incubaid.herolib.core.pathlib
|
||||||
|
import os
|
||||||
|
|
||||||
|
fn test_markdown_generation() {
|
||||||
|
console.print_header('CodeGenerator Markdown Test')
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Setup: Use the same test data as codeparser
|
||||||
|
test_dir := setup_test_directory()
|
||||||
|
defer {
|
||||||
|
os.rmdir_all(test_dir) or {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create output directory
|
||||||
|
output_dir := '/tmp/codegen_output'
|
||||||
|
os.rmdir_all(output_dir) or {}
|
||||||
|
os.mkdir_all(output_dir) or { panic('Failed to create output dir') }
|
||||||
|
defer {
|
||||||
|
os.rmdir_all(output_dir) or {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create generator
|
||||||
|
console.print_item('Creating CodeGenerator...')
|
||||||
|
mut gen := new(
|
||||||
|
parser_path: test_dir
|
||||||
|
output_dir: output_dir
|
||||||
|
recursive: true
|
||||||
|
)!
|
||||||
|
|
||||||
|
console.print_item('Parser found ${gen.parser.list_modules().len} modules')
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Test filename conversion
|
||||||
|
console.print_header('Test 1: Filename Conversion')
|
||||||
|
struct TestCase {
|
||||||
|
module_name string
|
||||||
|
expected string
|
||||||
|
}
|
||||||
|
|
||||||
|
test_cases := [
|
||||||
|
TestCase{
|
||||||
|
module_name: 'incubaid.herolib.core.code'
|
||||||
|
expected: 'code.md'
|
||||||
|
},
|
||||||
|
TestCase{
|
||||||
|
module_name: 'testdata'
|
||||||
|
expected: 'testdata.md'
|
||||||
|
},
|
||||||
|
TestCase{
|
||||||
|
module_name: 'testdata.services'
|
||||||
|
expected: 'services.md'
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
for test_case in test_cases {
|
||||||
|
result := gen.module_to_filename(test_case.module_name)
|
||||||
|
assert result == test_case.expected, 'Expected ${test_case.expected}, got ${result}'
|
||||||
|
console.print_item(' ✓ ${test_case.module_name} -> ${result}')
|
||||||
|
}
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Test module documentation generation
|
||||||
|
console.print_header('Test 2: Module Documentation Generation')
|
||||||
|
|
||||||
|
// Get a testdata module
|
||||||
|
modules := gen.parser.list_modules()
|
||||||
|
testdata_modules := modules.filter(it.contains('testdata'))
|
||||||
|
|
||||||
|
assert testdata_modules.len > 0, 'No testdata modules found'
|
||||||
|
|
||||||
|
for mod_name in testdata_modules {
|
||||||
|
console.print_item('Generating docs for: ${mod_name}')
|
||||||
|
|
||||||
|
md := gen.module_to_markdown(mod_name)!
|
||||||
|
|
||||||
|
// Validate markdown content
|
||||||
|
assert md.len > 0, 'Generated markdown is empty'
|
||||||
|
assert md.contains('# Module:'), 'Missing module header'
|
||||||
|
|
||||||
|
// List basic structure checks
|
||||||
|
structs := gen.parser.list_structs(mod_name)
|
||||||
|
functions := gen.parser.list_functions(mod_name)
|
||||||
|
consts := gen.parser.list_constants(mod_name)
|
||||||
|
|
||||||
|
if structs.len > 0 {
|
||||||
|
assert md.contains('## Structs'), 'Missing Structs section'
|
||||||
|
console.print_item(' - Found ${structs.len} structs')
|
||||||
|
}
|
||||||
|
|
||||||
|
if functions.len > 0 {
|
||||||
|
assert md.contains('## Functions'), 'Missing Functions section'
|
||||||
|
console.print_item(' - Found ${functions.len} functions')
|
||||||
|
}
|
||||||
|
|
||||||
|
if consts.len > 0 {
|
||||||
|
assert md.contains('## Constants'), 'Missing Constants section'
|
||||||
|
console.print_item(' - Found ${consts.len} constants')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Test file writing
|
||||||
|
console.print_header('Test 3: Write Generated Files')
|
||||||
|
|
||||||
|
for mod_name in testdata_modules {
|
||||||
|
gen.generate_module(mod_name)!
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify files were created
|
||||||
|
files := os.ls(output_dir)!
|
||||||
|
assert files.len > 0, 'No files generated'
|
||||||
|
|
||||||
|
console.print_item('Generated ${files.len} markdown files:')
|
||||||
|
for file in files {
|
||||||
|
console.print_item(' - ${file}')
|
||||||
|
|
||||||
|
// Verify file content
|
||||||
|
filepath := os.join_path(output_dir, file)
|
||||||
|
content := os.read_file(filepath)!
|
||||||
|
assert content.len > 0, 'Generated file is empty: ${file}'
|
||||||
|
}
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Test content validation
|
||||||
|
console.print_header('Test 4: Content Validation')
|
||||||
|
|
||||||
|
for file in files {
|
||||||
|
filepath := os.join_path(output_dir, file)
|
||||||
|
content := os.read_file(filepath)!
|
||||||
|
|
||||||
|
// Check for required sections
|
||||||
|
has_module_header := content.contains('# Module:')
|
||||||
|
has_imports := content.contains('## Imports') || !content.contains('import ')
|
||||||
|
has_valid_format := content.contains('```v')
|
||||||
|
|
||||||
|
assert has_module_header, '${file}: Missing module header'
|
||||||
|
assert has_valid_format || file.contains('services'), '${file}: Invalid markdown format'
|
||||||
|
|
||||||
|
console.print_item(' ✓ ${file}: Valid content')
|
||||||
|
}
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
console.print_green('✓ All CodeGenerator tests passed!')
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper: Setup test directory (copy from codeparser test)
|
||||||
|
fn setup_test_directory() string {
|
||||||
|
test_dir := '/tmp/codegen_test_data'
|
||||||
|
|
||||||
|
os.rmdir_all(test_dir) or {}
|
||||||
|
|
||||||
|
current_file := @FILE
|
||||||
|
current_dir := os.dir(current_file)
|
||||||
|
|
||||||
|
// Navigate to codeparser testdata
|
||||||
|
codeparser_dir := os.join_path(os.dir(current_dir), 'codeparser')
|
||||||
|
testdata_dir := os.join_path(codeparser_dir, 'testdata')
|
||||||
|
|
||||||
|
if !os.is_dir(testdata_dir) {
|
||||||
|
panic('testdata directory not found at: ${testdata_dir}')
|
||||||
|
}
|
||||||
|
|
||||||
|
os.mkdir_all(test_dir) or { panic('Failed to create test directory') }
|
||||||
|
copy_directory(testdata_dir, test_dir) or { panic('Failed to copy testdata: ${err}') }
|
||||||
|
|
||||||
|
return test_dir
|
||||||
|
}
|
||||||
|
|
||||||
|
fn copy_directory(src string, dst string) ! {
|
||||||
|
entries := os.ls(src)!
|
||||||
|
|
||||||
|
for entry in entries {
|
||||||
|
src_path := os.join_path(src, entry)
|
||||||
|
dst_path := os.join_path(dst, entry)
|
||||||
|
|
||||||
|
if os.is_dir(src_path) {
|
||||||
|
os.mkdir_all(dst_path)!
|
||||||
|
copy_directory(src_path, dst_path)!
|
||||||
|
} else {
|
||||||
|
content := os.read_file(src_path)!
|
||||||
|
os.write_file(dst_path, content)!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
1
lib/core/codegenerator/templates/function.md.template
Normal file
1
lib/core/codegenerator/templates/function.md.template
Normal file
@@ -0,0 +1 @@
|
|||||||
|
fn ${func.name}(${func.params.map(it.name + ': ' + it.typ.symbol()).join(', ')}) ${func.result.typ.symbol()}
|
||||||
5
lib/core/codegenerator/templates/module.md.template
Normal file
5
lib/core/codegenerator/templates/module.md.template
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Module: ${module_name}
|
||||||
|
|
||||||
|
This module provides functionality for code generation and documentation.
|
||||||
|
|
||||||
|
**Location:** `${module_name.replace('.', '/')}`
|
||||||
2
lib/core/codegenerator/templates/struct.md.template
Normal file
2
lib/core/codegenerator/templates/struct.md.template
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
struct ${struct_.name} {
|
||||||
|
}
|
||||||
@@ -204,4 +204,4 @@ pub fn (parser CodeParser) to_json(module_name string) !string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return json.encode_pretty(result)
|
return json.encode_pretty(result)
|
||||||
}
|
}
|
||||||
|
|||||||
22
lib/core/generator/heromodels/ai_instructions.v
Normal file
22
lib/core/generator/heromodels/ai_instructions.v
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
module heromodels
|
||||||
|
|
||||||
|
import incubaid.herolib.develop.gittools
|
||||||
|
import incubaid.herolib.core.pathlib
|
||||||
|
|
||||||
|
pub fn aiprompts_path() !string {
|
||||||
|
return gittools.path(
|
||||||
|
git_url: 'https://github.com/Incubaid/herolib/tree/development/aiprompts'
|
||||||
|
)!.path
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ai_instructions_hero_models() !string {
|
||||||
|
path := '${aiprompts_path()!}/ai_instructions_hero_models.md'
|
||||||
|
mut ppath := pathlib.get_file(path: path, create: false)!
|
||||||
|
return ppath.read()!
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ai_instructions_vlang_herolib_core() !string {
|
||||||
|
path := '${aiprompts_path()!}/vlang_herolib_core.md'
|
||||||
|
mut ppath := pathlib.get_file(path: path, create: false)!
|
||||||
|
return ppath.read()!
|
||||||
|
}
|
||||||
182
lib/core/generator/heromodels/code_generator.v
Executable file
182
lib/core/generator/heromodels/code_generator.v
Executable file
@@ -0,0 +1,182 @@
|
|||||||
|
module heromodels
|
||||||
|
|
||||||
|
import incubaid.herolib.core.pathlib
|
||||||
|
import incubaid.herolib.ui.console
|
||||||
|
import incubaid.herolib.ai.client
|
||||||
|
import os
|
||||||
|
|
||||||
|
pub fn do() {
|
||||||
|
console.print_header('Code Generator - V File Analyzer Using AI')
|
||||||
|
|
||||||
|
// Find herolib root directory using @FILE
|
||||||
|
script_dir := os.dir(@FILE)
|
||||||
|
// Navigate from examples/core/code to root: up 4 levels
|
||||||
|
herolib_root := os.dir(os.dir(os.dir(script_dir)))
|
||||||
|
|
||||||
|
console.print_item('HeroLib Root: ${herolib_root}')
|
||||||
|
|
||||||
|
// The directory we want to analyze (lib/core in this case)
|
||||||
|
target_dir := herolib_root + '/lib/core'
|
||||||
|
console.print_item('Target Directory: ${target_dir}')
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Load instruction files from aiprompts
|
||||||
|
console.print_item('Loading instruction files...')
|
||||||
|
|
||||||
|
mut ai_instructions_file := pathlib.get(herolib_root +
|
||||||
|
'/aiprompts/ai_instructions_hero_models.md')
|
||||||
|
mut vlang_core_file := pathlib.get(herolib_root + '/aiprompts/vlang_herolib_core.md')
|
||||||
|
|
||||||
|
ai_instructions_content := ai_instructions_file.read()!
|
||||||
|
vlang_core_content := vlang_core_file.read()!
|
||||||
|
|
||||||
|
console.print_green('✓ Instruction files loaded successfully')
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Initialize AI client
|
||||||
|
console.print_item('Initializing AI client...')
|
||||||
|
mut aiclient := client.new()!
|
||||||
|
console.print_green('✓ AI client initialized')
|
||||||
|
console.print_lf(1)
|
||||||
|
|
||||||
|
// Get all V files from target directory
|
||||||
|
console.print_item('Scanning directory for V files...')
|
||||||
|
|
||||||
|
mut target_path := pathlib.get_dir(path: target_dir, create: false)!
|
||||||
|
mut all_files := target_path.list(
|
||||||
|
regex: [r'\.v$']
|
||||||
|
recursive: true
|
||||||
|
)!
|
||||||
|
|
||||||
|
console.print_item('Found ${all_files.paths.len} total V files')
|
||||||
|
|
||||||
|
// TODO: Walk over all files which do NOT end with _test.v and do NOT start with factory
|
||||||
|
// Each file becomes a src_file_content object
|
||||||
|
mut files_to_process := []pathlib.Path{}
|
||||||
|
|
||||||
|
for file in all_files.paths {
|
||||||
|
file_name := file.name()
|
||||||
|
|
||||||
|
// Skip test files
|
||||||
|
if file_name.ends_with('_test.v') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip factory files
|
||||||
|
if file_name.starts_with('factory') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
files_to_process << file
|
||||||
|
}
|
||||||
|
|
||||||
|
console.print_green('✓ After filtering: ${files_to_process.len} files to process')
|
||||||
|
console.print_lf(2)
|
||||||
|
|
||||||
|
// Process each file with AI
|
||||||
|
total_files := files_to_process.len
|
||||||
|
|
||||||
|
for idx, mut file in files_to_process {
|
||||||
|
current_idx := idx + 1
|
||||||
|
process_file_with_ai(mut aiclient, mut file, ai_instructions_content, vlang_core_content,
|
||||||
|
current_idx, total_files)!
|
||||||
|
}
|
||||||
|
|
||||||
|
console.print_lf(1)
|
||||||
|
console.print_header('✓ Code Generation Complete')
|
||||||
|
console.print_item('Processed ${files_to_process.len} files')
|
||||||
|
console.print_lf(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_file_with_ai(mut aiclient client.AIClient, mut file pathlib.Path, ai_instructions string, vlang_core string, current int, total int) ! {
|
||||||
|
file_name := file.name()
|
||||||
|
src_file_path := file.absolute()
|
||||||
|
|
||||||
|
console.print_item('[${current}/${total}] Analyzing: ${file_name}')
|
||||||
|
|
||||||
|
// Read the file content - this is the src_file_content
|
||||||
|
src_file_content := file.read()!
|
||||||
|
|
||||||
|
// Build comprehensive system prompt
|
||||||
|
// TODO: Load instructions from prompt files and use in prompt
|
||||||
|
|
||||||
|
// Build the user prompt with context
|
||||||
|
user_prompt := '
|
||||||
|
File: ${file_name}
|
||||||
|
Path: ${src_file_path}
|
||||||
|
|
||||||
|
Current content:
|
||||||
|
\`\`\`v
|
||||||
|
${src_file_content}
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
Please improve this V file by:
|
||||||
|
1. Following V language best practices
|
||||||
|
2. Ensuring proper error handling with ! and or blocks
|
||||||
|
3. Adding clear documentation comments
|
||||||
|
4. Following herolib patterns and conventions
|
||||||
|
5. Improving code clarity and readability
|
||||||
|
|
||||||
|
Context from herolib guidelines:
|
||||||
|
|
||||||
|
VLANG HEROLIB CORE:
|
||||||
|
${vlang_core}
|
||||||
|
|
||||||
|
AI INSTRUCTIONS FOR HERO MODELS:
|
||||||
|
${ai_instructions}
|
||||||
|
|
||||||
|
Return ONLY the complete improved file wrapped in \`\`\`v code block.
|
||||||
|
'
|
||||||
|
|
||||||
|
console.print_debug_title('Sending to AI', 'Calling AI model to improve ${file_name}...')
|
||||||
|
|
||||||
|
// TODO: Call AI client with model gemini-3-pro
|
||||||
|
aiclient.write_from_prompt(file, user_prompt, [.pro]) or {
|
||||||
|
console.print_stderr('Error processing ${file_name}: ${err}')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mut improved_file := pathlib.get(src_file_path + '.improved')
|
||||||
|
improved_content := improved_file.read()!
|
||||||
|
|
||||||
|
// Display improvements summary
|
||||||
|
sample_chars := 250
|
||||||
|
preview := if improved_content.len > sample_chars {
|
||||||
|
improved_content[..sample_chars] + '... (preview truncated)'
|
||||||
|
} else {
|
||||||
|
improved_content
|
||||||
|
}
|
||||||
|
|
||||||
|
console.print_debug_title('AI Analysis Results for ${file_name}', preview)
|
||||||
|
|
||||||
|
// Optional: Save improved version for review
|
||||||
|
// Uncomment to enable saving
|
||||||
|
// improved_file_path := src_file_path + '.improved'
|
||||||
|
// mut improved_file := pathlib.get_file(path: improved_file_path, create: true)!
|
||||||
|
// improved_file.write(improved_content)!
|
||||||
|
// console.print_green('✓ Improvements saved to: ${improved_file_path}')
|
||||||
|
|
||||||
|
console.print_lf(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract V code from markdown code block
|
||||||
|
fn extract_code_block(response string) string {
|
||||||
|
// Look for ```v ... ``` block
|
||||||
|
start_marker := '\`\`\`v'
|
||||||
|
end_marker := '\`\`\`'
|
||||||
|
|
||||||
|
start_idx := response.index(start_marker) or {
|
||||||
|
// If no ```v, try to return as-is
|
||||||
|
return response
|
||||||
|
}
|
||||||
|
|
||||||
|
mut content_start := start_idx + start_marker.len
|
||||||
|
if content_start < response.len && response[content_start] == `\n` {
|
||||||
|
content_start++
|
||||||
|
}
|
||||||
|
|
||||||
|
end_idx := response.index(end_marker) or { return response[content_start..] }
|
||||||
|
|
||||||
|
extracted := response[content_start..end_idx]
|
||||||
|
return extracted.trim_space()
|
||||||
|
}
|
||||||
25
lib/core/generator/heromodels/templates/model_code.md
Normal file
25
lib/core/generator/heromodels/templates/model_code.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
File: ${file_name}
|
||||||
|
Path: ${src_file_path}
|
||||||
|
|
||||||
|
Current content:
|
||||||
|
|
||||||
|
```v
|
||||||
|
${src_file_content}
|
||||||
|
```
|
||||||
|
|
||||||
|
Please improve this V file by:
|
||||||
|
1. Following V language best practices
|
||||||
|
2. Ensuring proper error handling with ! and or blocks
|
||||||
|
3. Adding clear documentation comments
|
||||||
|
4. Following herolib patterns and conventions
|
||||||
|
5. Improving code clarity and readability
|
||||||
|
|
||||||
|
Context from herolib guidelines:
|
||||||
|
|
||||||
|
VLANG HEROLIB CORE:
|
||||||
|
${vlang_core}
|
||||||
|
|
||||||
|
AI INSTRUCTIONS FOR HERO MODELS:
|
||||||
|
${ai_instructions}
|
||||||
|
|
||||||
|
Return ONLY the complete improved file wrapped in ```v code block.
|
||||||
@@ -2,7 +2,6 @@ module pathlib
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import regex
|
import regex
|
||||||
// import incubaid.herolib.core.smartid
|
|
||||||
import incubaid.herolib.ui.console
|
import incubaid.herolib.ui.console
|
||||||
|
|
||||||
@[params]
|
@[params]
|
||||||
@@ -38,6 +37,7 @@ pub mut:
|
|||||||
// example see https://github.com/incubaid/herolib/blob/development/examples/core/pathlib/examples/list/path_list.v
|
// example see https://github.com/incubaid/herolib/blob/development/examples/core/pathlib/examples/list/path_list.v
|
||||||
//
|
//
|
||||||
// e.g. p.list(regex:[r'.*\.v$'])! //notice the r in front of string, this is regex for all files ending with .v
|
// e.g. p.list(regex:[r'.*\.v$'])! //notice the r in front of string, this is regex for all files ending with .v
|
||||||
|
// e.g.
|
||||||
//
|
//
|
||||||
// ```
|
// ```
|
||||||
// please note links are ignored for walking over dirstructure (for files and dirs)
|
// please note links are ignored for walking over dirstructure (for files and dirs)
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
# CodeWalker Module
|
|
||||||
|
|
||||||
The CodeWalker module provides functionality to walk through directories and create a map of files with their content. It's particularly useful for processing code directories while respecting gitignore patterns.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
- Walk through directories recursively
|
|
||||||
- Respect gitignore patterns to exclude files
|
|
||||||
- Store file content in memory
|
|
||||||
- Export files back to a directory structure
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```v
|
|
||||||
import incubaid.herolib.lib.lang.codewalker
|
|
||||||
|
|
||||||
mut cw := codewalker.new('/tmp/adir')!
|
|
||||||
|
|
||||||
// Get content of a specific file
|
|
||||||
content := cw.filemap.get('path/to/file.txt')!
|
|
||||||
|
|
||||||
// return output again
|
|
||||||
cw.filemap.content()
|
|
||||||
|
|
||||||
// Export all files to a destination directory
|
|
||||||
cw.filemap.export('/tmp/exported_files')!
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
### format of filemap
|
|
||||||
|
|
||||||
## full files
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
text before will be ignored
|
|
||||||
|
|
||||||
===FILE:filename===
|
|
||||||
code
|
|
||||||
===FILE:filename===
|
|
||||||
code
|
|
||||||
===END===
|
|
||||||
|
|
||||||
text behind will be ignored
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
## files with changes
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
text before will be ignored
|
|
||||||
|
|
||||||
===FILECHANGE:filename===
|
|
||||||
code
|
|
||||||
===FILECHANGE:filename===
|
|
||||||
code
|
|
||||||
===END===
|
|
||||||
|
|
||||||
text behind will be ignored
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
FILECHANGE and FILE can be mixed, in FILE it means we have full content otherwise only changed content e.g. a method or s struct and then we need to use morph to change it
|
|
||||||
@@ -1,219 +0,0 @@
|
|||||||
module codewalker
|
|
||||||
|
|
||||||
import incubaid.herolib.core.pathlib
|
|
||||||
|
|
||||||
pub struct CodeWalker {
|
|
||||||
pub mut:
|
|
||||||
ignorematcher IgnoreMatcher
|
|
||||||
errors []CWError
|
|
||||||
}
|
|
||||||
|
|
||||||
@[params]
|
|
||||||
pub struct FileMapArgs {
|
|
||||||
pub mut:
|
|
||||||
path string
|
|
||||||
content string
|
|
||||||
content_read bool = true // if we start from path, and this is on false then we don't read the content
|
|
||||||
}
|
|
||||||
|
|
||||||
// Public factory to parse the filemap-text format directly
|
|
||||||
pub fn (mut cw CodeWalker) parse(content string) !FileMap {
|
|
||||||
return cw.filemap_get_from_content(content)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn (mut cw CodeWalker) filemap_get(args FileMapArgs) !FileMap {
|
|
||||||
if args.path != '' {
|
|
||||||
return cw.filemap_get_from_path(args.path, args.content_read)!
|
|
||||||
} else if args.content != '' {
|
|
||||||
return cw.filemap_get_from_content(args.content)!
|
|
||||||
} else {
|
|
||||||
return error('Either path or content must be provided to get FileMap')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the filemap from a path
|
|
||||||
fn (mut cw CodeWalker) filemap_get_from_path(path string, content_read bool) !FileMap {
|
|
||||||
mut dir := pathlib.get(path)
|
|
||||||
if !dir.exists() || !dir.is_dir() {
|
|
||||||
return error('Source directory "${path}" does not exist')
|
|
||||||
}
|
|
||||||
|
|
||||||
mut files := dir.list(ignore_default: false)!
|
|
||||||
mut fm := FileMap{
|
|
||||||
source: path
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect ignore patterns from .gitignore and .heroignore files (recursively),
|
|
||||||
// and scope them to the directory where they were found
|
|
||||||
for mut p in files.paths {
|
|
||||||
if p.is_file() {
|
|
||||||
name := p.name()
|
|
||||||
if name == '.gitignore' || name == '.heroignore' {
|
|
||||||
content := p.read() or { '' }
|
|
||||||
if content != '' {
|
|
||||||
rel := p.path_relative(path) or { '' }
|
|
||||||
base_rel := if rel.contains('/') { rel.all_before_last('/') } else { '' }
|
|
||||||
cw.ignorematcher.add_content_with_base(base_rel, content)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for mut file in files.paths {
|
|
||||||
if file.is_file() {
|
|
||||||
name := file.name()
|
|
||||||
if name == '.gitignore' || name == '.heroignore' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
relpath := file.path_relative(path)!
|
|
||||||
if cw.ignorematcher.is_ignored(relpath) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if content_read {
|
|
||||||
content := file.read()!
|
|
||||||
fm.content[relpath] = content
|
|
||||||
} else {
|
|
||||||
fm.content[relpath] = ''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse a header line and return (kind, filename)
|
|
||||||
// kind: 'FILE' | 'FILECHANGE' | 'LEGACY' | 'END'
|
|
||||||
fn (mut cw CodeWalker) parse_header(line string, linenr int) !(string, string) {
|
|
||||||
if line == '===END===' {
|
|
||||||
return 'END', ''
|
|
||||||
}
|
|
||||||
if line.starts_with('===FILE:') && line.ends_with('===') {
|
|
||||||
name := line.trim_left('=').trim_right('=').all_after(':').trim_space()
|
|
||||||
if name.len < 1 {
|
|
||||||
cw.error('Invalid filename, < 1 chars.', linenr, 'filename_get', true)!
|
|
||||||
}
|
|
||||||
return 'FILE', name
|
|
||||||
}
|
|
||||||
if line.starts_with('===FILECHANGE:') && line.ends_with('===') {
|
|
||||||
name := line.trim_left('=').trim_right('=').all_after(':').trim_space()
|
|
||||||
if name.len < 1 {
|
|
||||||
cw.error('Invalid filename, < 1 chars.', linenr, 'filename_get', true)!
|
|
||||||
}
|
|
||||||
return 'FILECHANGE', name
|
|
||||||
}
|
|
||||||
// Legacy header: ===filename===
|
|
||||||
if line.starts_with('===') && line.ends_with('===') {
|
|
||||||
name := line.trim('=').trim_space()
|
|
||||||
if name == 'END' {
|
|
||||||
return 'END', ''
|
|
||||||
}
|
|
||||||
if name.len < 1 {
|
|
||||||
cw.error('Invalid filename, < 1 chars.', linenr, 'filename_get', true)!
|
|
||||||
}
|
|
||||||
return 'LEGACY', name
|
|
||||||
}
|
|
||||||
return '', ''
|
|
||||||
}
|
|
||||||
|
|
||||||
fn (mut cw CodeWalker) error(msg string, linenr int, category string, fail bool) ! {
|
|
||||||
cw.errors << CWError{
|
|
||||||
message: msg
|
|
||||||
linenr: linenr
|
|
||||||
category: category
|
|
||||||
}
|
|
||||||
if fail {
|
|
||||||
return error(msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// internal function to get the filename
|
|
||||||
fn (mut cw CodeWalker) parse_filename_get(line string, linenr int) !string {
|
|
||||||
parts := line.split('===')
|
|
||||||
if parts.len < 2 {
|
|
||||||
cw.error('Invalid filename line: ${line}.', linenr, 'filename_get', true)!
|
|
||||||
}
|
|
||||||
mut name := parts[1].trim_space()
|
|
||||||
if name.len < 2 {
|
|
||||||
cw.error('Invalid filename, < 2 chars: ${name}.', linenr, 'filename_get', true)!
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
enum ParseState {
|
|
||||||
start
|
|
||||||
in_block
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse filemap content string
|
|
||||||
fn (mut cw CodeWalker) filemap_get_from_content(content string) !FileMap {
|
|
||||||
mut fm := FileMap{}
|
|
||||||
|
|
||||||
mut current_kind := '' // 'FILE' | 'FILECHANGE' | 'LEGACY'
|
|
||||||
mut filename := ''
|
|
||||||
mut block := []string{}
|
|
||||||
mut had_any_block := false
|
|
||||||
|
|
||||||
mut linenr := 0
|
|
||||||
|
|
||||||
for line in content.split_into_lines() {
|
|
||||||
linenr += 1
|
|
||||||
line2 := line.trim_space()
|
|
||||||
|
|
||||||
kind, name := cw.parse_header(line2, linenr)!
|
|
||||||
if kind == 'END' {
|
|
||||||
if filename == '' {
|
|
||||||
if had_any_block {
|
|
||||||
cw.error("Filename 'END' is reserved.", linenr, 'parse', true)!
|
|
||||||
} else {
|
|
||||||
cw.error('END found at start, not good.', linenr, 'parse', true)!
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if current_kind == 'FILE' || current_kind == 'LEGACY' {
|
|
||||||
fm.content[filename] = block.join_lines()
|
|
||||||
} else if current_kind == 'FILECHANGE' {
|
|
||||||
fm.content_change[filename] = block.join_lines()
|
|
||||||
}
|
|
||||||
filename = ''
|
|
||||||
block = []string{}
|
|
||||||
current_kind = ''
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if kind in ['FILE', 'FILECHANGE', 'LEGACY'] {
|
|
||||||
// starting a new block header
|
|
||||||
if filename != '' {
|
|
||||||
if current_kind == 'FILE' || current_kind == 'LEGACY' {
|
|
||||||
fm.content[filename] = block.join_lines()
|
|
||||||
} else if current_kind == 'FILECHANGE' {
|
|
||||||
fm.content_change[filename] = block.join_lines()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
filename = name
|
|
||||||
current_kind = kind
|
|
||||||
block = []string{}
|
|
||||||
had_any_block = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Non-header line
|
|
||||||
if filename == '' {
|
|
||||||
if line2.len > 0 {
|
|
||||||
cw.error("Unexpected content before first file block: '${line}'.", linenr,
|
|
||||||
'parse', false)!
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
block << line
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// EOF: flush current block if any
|
|
||||||
if filename != '' {
|
|
||||||
if current_kind == 'FILE' || current_kind == 'LEGACY' {
|
|
||||||
fm.content[filename] = block.join_lines()
|
|
||||||
} else if current_kind == 'FILECHANGE' {
|
|
||||||
fm.content_change[filename] = block.join_lines()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fm
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
module codewalker
|
|
||||||
|
|
||||||
@[params]
|
|
||||||
pub struct CodeWalkerArgs {
|
|
||||||
// No fields required for now; kept for API stability
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(args CodeWalkerArgs) !CodeWalker {
|
|
||||||
mut cw := CodeWalker{}
|
|
||||||
cw.ignorematcher = gitignore_matcher_new()
|
|
||||||
return cw
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
module codewalker
|
module heroprompt
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
@@ -82,99 +82,6 @@ pub:
|
|||||||
typ string
|
typ string
|
||||||
}
|
}
|
||||||
|
|
||||||
// list_directory lists the contents of a directory.
|
|
||||||
// - base_path: workspace base path
|
|
||||||
// - rel_path: relative path from base (or absolute path)
|
|
||||||
// Returns a list of DirItem with name and type (file/directory).
|
|
||||||
pub fn list_directory(base_path string, rel_path string) ![]DirItem {
|
|
||||||
dir := resolve_path(base_path, rel_path)
|
|
||||||
if dir.len == 0 {
|
|
||||||
return error('base_path not set')
|
|
||||||
}
|
|
||||||
entries := os.ls(dir) or { return error('cannot list directory') }
|
|
||||||
mut out := []DirItem{}
|
|
||||||
for e in entries {
|
|
||||||
full := os.join_path(dir, e)
|
|
||||||
if os.is_dir(full) {
|
|
||||||
out << DirItem{
|
|
||||||
name: e
|
|
||||||
typ: 'directory'
|
|
||||||
}
|
|
||||||
} else if os.is_file(full) {
|
|
||||||
out << DirItem{
|
|
||||||
name: e
|
|
||||||
typ: 'file'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// list_directory_filtered lists the contents of a directory with ignore filtering applied.
|
|
||||||
// - base_path: workspace base path
|
|
||||||
// - rel_path: relative path from base (or absolute path)
|
|
||||||
// - ignore_matcher: IgnoreMatcher to filter out ignored files/directories
|
|
||||||
// Returns a list of DirItem with name and type (file/directory), filtered by ignore patterns.
|
|
||||||
pub fn list_directory_filtered(base_path string, rel_path string, ignore_matcher &IgnoreMatcher) ![]DirItem {
|
|
||||||
dir := resolve_path(base_path, rel_path)
|
|
||||||
if dir.len == 0 {
|
|
||||||
return error('base_path not set')
|
|
||||||
}
|
|
||||||
entries := os.ls(dir) or { return error('cannot list directory') }
|
|
||||||
mut out := []DirItem{}
|
|
||||||
for e in entries {
|
|
||||||
full := os.join_path(dir, e)
|
|
||||||
|
|
||||||
// Calculate relative path from base_path for ignore checking
|
|
||||||
mut check_path := if rel_path.len > 0 {
|
|
||||||
if rel_path.ends_with('/') { rel_path + e } else { rel_path + '/' + e }
|
|
||||||
} else {
|
|
||||||
e
|
|
||||||
}
|
|
||||||
|
|
||||||
// For directories, also check with trailing slash
|
|
||||||
is_directory := os.is_dir(full)
|
|
||||||
mut should_ignore := ignore_matcher.is_ignored(check_path)
|
|
||||||
if is_directory && !should_ignore {
|
|
||||||
// Also check directory pattern with trailing slash
|
|
||||||
should_ignore = ignore_matcher.is_ignored(check_path + '/')
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this entry should be ignored
|
|
||||||
if should_ignore {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if is_directory {
|
|
||||||
out << DirItem{
|
|
||||||
name: e
|
|
||||||
typ: 'directory'
|
|
||||||
}
|
|
||||||
} else if os.is_file(full) {
|
|
||||||
out << DirItem{
|
|
||||||
name: e
|
|
||||||
typ: 'file'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// list_files_recursive recursively lists all files in a directory
|
|
||||||
pub fn list_files_recursive(root string) []string {
|
|
||||||
mut out := []string{}
|
|
||||||
entries := os.ls(root) or { return out }
|
|
||||||
for e in entries {
|
|
||||||
fp := os.join_path(root, e)
|
|
||||||
if os.is_dir(fp) {
|
|
||||||
out << list_files_recursive(fp)
|
|
||||||
} else if os.is_file(fp) {
|
|
||||||
out << fp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// build_file_tree_fs builds a file system tree for given root directories
|
// build_file_tree_fs builds a file system tree for given root directories
|
||||||
pub fn build_file_tree_fs(roots []string, prefix string) string {
|
pub fn build_file_tree_fs(roots []string, prefix string) string {
|
||||||
mut out := ''
|
mut out := ''
|
||||||
Reference in New Issue
Block a user