diff --git a/examples/aiexamples/groq.vsh b/examples/aiexamples/groq.vsh
index 7ccd4db3..bdf753d1 100755
--- a/examples/aiexamples/groq.vsh
+++ b/examples/aiexamples/groq.vsh
@@ -5,25 +5,24 @@ module main
import freeflowuniverse.herolib.clients.openai
import os
-
-fn test1(mut client openai.OpenAI)!{
-
-
- instruction:='
+fn test1(mut client openai.OpenAI) ! {
+ instruction := '
You are a template language converter. You convert Pug templates to Jet templates.
The target template language, Jet, is defined as follows:
'
// Create a chat completion request
- res := client.chat_completion(msgs:openai.Messages{
- messages: [
- openai.Message{
- role: .user
- content: 'What are the key differences between Groq and other AI inference providers?'
- },
- ]
- })!
+ res := client.chat_completion(
+ msgs: openai.Messages{
+ messages: [
+ openai.Message{
+ role: .user
+ content: 'What are the key differences between Groq and other AI inference providers?'
+ },
+ ]
+ }
+ )!
// Print the response
println('\nGroq AI Response:')
@@ -33,23 +32,21 @@ fn test1(mut client openai.OpenAI)!{
println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}')
println('Total tokens: ${res.usage.total_tokens}')
-
}
-
-fn test2(mut client openai.OpenAI)!{
-
+fn test2(mut client openai.OpenAI) ! {
// Create a chat completion request
res := client.chat_completion(
- model:"deepseek-r1-distill-llama-70b",
- msgs:openai.Messages{
- messages: [
- openai.Message{
- role: .user
- content: 'A story of 10 lines?'
- },
- ]
- })!
+ model: 'deepseek-r1-distill-llama-70b'
+ msgs: openai.Messages{
+ messages: [
+ openai.Message{
+ role: .user
+ content: 'A story of 10 lines?'
+ },
+ ]
+ }
+ )!
println('\nGroq AI Response:')
println('==================')
@@ -57,21 +54,18 @@ fn test2(mut client openai.OpenAI)!{
println('\nUsage Statistics:')
println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}')
- println('Total tokens: ${res.usage.total_tokens}')
-
+ println('Total tokens: ${res.usage.total_tokens}')
}
-
-println('
+println("
TO USE:
-export AIKEY=\'gsk_...\'
-export AIURL=\'https://api.groq.com/openai/v1\'
-export AIMODEL=\'llama-3.3-70b-versatile\'
-')
+export AIKEY='gsk_...'
+export AIURL='https://api.groq.com/openai/v1'
+export AIMODEL='llama-3.3-70b-versatile'
+")
-mut client:=openai.get(name:"test")!
+mut client := openai.get(name: 'test')!
println(client)
-
// test1(mut client)!
test2(mut client)!
diff --git a/examples/aiexamples/jetconvertor.vsh b/examples/aiexamples/jetconvertor.vsh
index 8c7bb5cd..78823047 100755
--- a/examples/aiexamples/jetconvertor.vsh
+++ b/examples/aiexamples/jetconvertor.vsh
@@ -4,4 +4,4 @@ import freeflowuniverse.herolib.mcp.aitools
// aitools.convert_pug("/root/code/github/freeflowuniverse/herolauncher/pkg/herolauncher/web/templates/admin")!
-aitools.convert_pug("/root/code/github/freeflowuniverse/herolauncher/pkg/zaz/webui/templates")!
\ No newline at end of file
+aitools.convert_pug('/root/code/github/freeflowuniverse/herolauncher/pkg/zaz/webui/templates')!
diff --git a/examples/aiexamples/qdrant.vsh b/examples/aiexamples/qdrant.vsh
index b6d3e9a1..565025da 100755
--- a/examples/aiexamples/qdrant.vsh
+++ b/examples/aiexamples/qdrant.vsh
@@ -12,7 +12,7 @@ println('Starting Qdrant example script')
println('Current directory: ${os.getwd()}')
println('Home directory: ${os.home_dir()}')
-mut i:=qdrant_installer.get()!
+mut i := qdrant_installer.get()!
i.install()!
// 1. Get the qdrant client
diff --git a/examples/webtools/docusaurus_example.vsh b/examples/webtools/docusaurus_example.vsh
index 45c550ea..bb3e0085 100755
--- a/examples/webtools/docusaurus_example.vsh
+++ b/examples/webtools/docusaurus_example.vsh
@@ -6,5 +6,3 @@ import freeflowuniverse.herolib.web.docusaurus
mut docs := docusaurus.new(
build_path: '/tmp/docusaurus_build'
)!
-
-
diff --git a/examples/webtools/docusaurus_example_complete.vsh b/examples/webtools/docusaurus_example_complete.vsh
index eae345d3..122a2309 100755
--- a/examples/webtools/docusaurus_example_complete.vsh
+++ b/examples/webtools/docusaurus_example_complete.vsh
@@ -90,14 +90,13 @@ fn main() {
'
mut docs := docusaurus.new(
- build_path: os.join_path(os.home_dir(), 'hero/var/docusaurus_demo1')
- update: true // Update the templates
- heroscript: hero_script
+ build_path: os.join_path(os.home_dir(), 'hero/var/docusaurus_demo1')
+ update: true // Update the templates
+ heroscript: hero_script
) or {
- eprintln('Error creating docusaurus factory with inline script: ${err}')
- exit(1)
+ eprintln('Error creating docusaurus factory with inline script: ${err}')
+ exit(1)
}
-
// Create a site directory if it doesn't exist
site_path := os.join_path(os.home_dir(), 'hero/var/docusaurus_demo_src')
@@ -204,19 +203,19 @@ console.log(result);
eprintln('Error generating site: ${err}')
exit(1)
}
-
+
println('Site generated successfully!')
// Choose which operation to perform:
-
- // Option 1: Run in development mode
+
+ // Option 1: Run in development mode
// This will start a development server in a screen session
println('Starting development server...')
site.dev() or {
eprintln('Error starting development server: ${err}')
exit(1)
}
-
+
// Option 2: Build for production (uncomment to use)
/*
println('Building site for production...')
@@ -236,4 +235,4 @@ console.log(result);
}
println('Site published successfully!')
*/
-}
\ No newline at end of file
+}
diff --git a/lib/ai/escalayer/escalayer.v b/lib/ai/escalayer/escalayer.v
index 2c1fac97..8648a4b7 100644
--- a/lib/ai/escalayer/escalayer.v
+++ b/lib/ai/escalayer/escalayer.v
@@ -6,35 +6,35 @@ import freeflowuniverse.herolib.clients.openai
@[params]
pub struct TaskParams {
pub:
- name string
- description string
+ name string
+ description string
}
// Create a new task
pub fn new_task(params TaskParams) &Task {
- return &Task{
- name: params.name
- description: params.description
- unit_tasks: []
- current_result: ''
- }
+ return &Task{
+ name: params.name
+ description: params.description
+ unit_tasks: []
+ current_result: ''
+ }
}
// Default model configurations
pub fn default_base_model() ModelConfig {
- return ModelConfig{
- name: 'qwen2.5-7b-instruct'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 2000
- }
+ return ModelConfig{
+ name: 'qwen2.5-7b-instruct'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 2000
+ }
}
pub fn default_retry_model() ModelConfig {
- return ModelConfig{
- name: 'gpt-4'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 4000
- }
-}
\ No newline at end of file
+ return ModelConfig{
+ name: 'gpt-4'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 4000
+ }
+}
diff --git a/lib/ai/escalayer/models.v b/lib/ai/escalayer/models.v
index fb9f6672..179a241f 100644
--- a/lib/ai/escalayer/models.v
+++ b/lib/ai/escalayer/models.v
@@ -5,59 +5,58 @@ import freeflowuniverse.herolib.clients.openai
// ModelConfig defines the configuration for an AI model
pub struct ModelConfig {
pub mut:
- name string
- provider string
- temperature f32
- max_tokens int
+ name string
+ provider string
+ temperature f32
+ max_tokens int
}
// Create model configs
-const claude_3_sonnet = escalayer.ModelConfig{
- name: 'anthropic/claude-3.7-sonnet'
- provider: 'anthropic'
- temperature: 0.7
- max_tokens: 25000
+const claude_3_sonnet = ModelConfig{
+ name: 'anthropic/claude-3.7-sonnet'
+ provider: 'anthropic'
+ temperature: 0.7
+ max_tokens: 25000
}
-const gpt4 = escalayer.ModelConfig{
- name: 'gpt-4'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 25000
+const gpt4 = ModelConfig{
+ name: 'gpt-4'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 25000
}
-
// Call an AI model using OpenRouter
-fn call_ai_model(prompt string, model ModelConfig)! string {
- // Get OpenAI client (configured for OpenRouter)
- mut client := get_openrouter_client()!
-
- // Create the message for the AI
- mut m := openai.Messages{
- messages: [
- openai.Message{
- role: .system
- content: 'You are a helpful assistant.'
- },
- openai.Message{
- role: .user
- content: prompt
- }
- ]
- }
-
- // Call the AI model
- res := client.chat_completion(
- msgs: m,
- model: model.name,
- temperature: model.temperature,
- max_completion_tokens: model.max_tokens
- )!
-
- // Extract the response content
- if res.choices.len > 0 {
- return res.choices[0].message.content
- }
-
- return error('No response from AI model')
-}
\ No newline at end of file
+fn call_ai_model(prompt string, model ModelConfig) !string {
+ // Get OpenAI client (configured for OpenRouter)
+ mut client := get_openrouter_client()!
+
+ // Create the message for the AI
+ mut m := openai.Messages{
+ messages: [
+ openai.Message{
+ role: .system
+ content: 'You are a helpful assistant.'
+ },
+ openai.Message{
+ role: .user
+ content: prompt
+ },
+ ]
+ }
+
+ // Call the AI model
+ res := client.chat_completion(
+ msgs: m
+ model: model.name
+ temperature: model.temperature
+ max_completion_tokens: model.max_tokens
+ )!
+
+ // Extract the response content
+ if res.choices.len > 0 {
+ return res.choices[0].message.content
+ }
+
+ return error('No response from AI model')
+}
diff --git a/lib/ai/escalayer/openrouter.v b/lib/ai/escalayer/openrouter.v
index 51a93525..5d3cece2 100644
--- a/lib/ai/escalayer/openrouter.v
+++ b/lib/ai/escalayer/openrouter.v
@@ -5,19 +5,18 @@ import freeflowuniverse.herolib.osal
import os
// Get an OpenAI client configured for OpenRouter
-fn get_openrouter_client()! &openai.OpenAI {
-
+fn get_openrouter_client() !&openai.OpenAI {
osal.env_set(key: 'OPENROUTER_API_KEY', value: '')
- // Get API key from environment variable
- api_key := os.getenv('OPENROUTER_API_KEY')
- if api_key == '' {
- return error('OPENROUTER_API_KEY environment variable not set')
- }
-
- // Create OpenAI client with OpenRouter base URL
- mut client := openai.get(
- name: 'openrouter'
- )!
-
- return client
-}
\ No newline at end of file
+ // Get API key from environment variable
+ api_key := os.getenv('OPENROUTER_API_KEY')
+ if api_key == '' {
+ return error('OPENROUTER_API_KEY environment variable not set')
+ }
+
+ // Create OpenAI client with OpenRouter base URL
+ mut client := openai.get(
+ name: 'openrouter'
+ )!
+
+ return client
+}
diff --git a/lib/ai/escalayer/task.v b/lib/ai/escalayer/task.v
index 190b65fa..0e8ffa7a 100644
--- a/lib/ai/escalayer/task.v
+++ b/lib/ai/escalayer/task.v
@@ -5,53 +5,61 @@ import log
// Task represents a complete AI task composed of multiple sequential unit tasks
pub struct Task {
pub mut:
- name string
- description string
- unit_tasks []UnitTask
- current_result string
+ name string
+ description string
+ unit_tasks []UnitTask
+ current_result string
}
// UnitTaskParams defines the parameters for creating a new unit task
@[params]
pub struct UnitTaskParams {
pub:
- name string
- prompt_function fn(string) string
- callback_function fn(string)! string
- base_model ?ModelConfig
- retry_model ?ModelConfig
- retry_count ?int
+ name string
+ prompt_function fn (string) string
+ callback_function fn (string) !string
+ base_model ?ModelConfig
+ retry_model ?ModelConfig
+ retry_count ?int
}
// Add a new unit task to the task
pub fn (mut t Task) new_unit_task(params UnitTaskParams) &UnitTask {
- mut unit_task := UnitTask{
- name: params.name
- prompt_function: params.prompt_function
- callback_function: params.callback_function
- base_model: if base_model := params.base_model { base_model } else { default_base_model() }
- retry_model: if retry_model := params.retry_model { retry_model } else { default_retry_model() }
- retry_count: if retry_count := params.retry_count { retry_count } else { 3 }
- }
-
- t.unit_tasks << unit_task
- return &t.unit_tasks[t.unit_tasks.len - 1]
+ mut unit_task := UnitTask{
+ name: params.name
+ prompt_function: params.prompt_function
+ callback_function: params.callback_function
+ base_model: if base_model := params.base_model {
+ base_model
+ } else {
+ default_base_model()
+ }
+ retry_model: if retry_model := params.retry_model {
+ retry_model
+ } else {
+ default_retry_model()
+ }
+ retry_count: if retry_count := params.retry_count { retry_count } else { 3 }
+ }
+
+ t.unit_tasks << unit_task
+ return &t.unit_tasks[t.unit_tasks.len - 1]
}
// Initiate the task execution
-pub fn (mut t Task) initiate(input string)! string {
- mut current_input := input
-
- for i, mut unit_task in t.unit_tasks {
- log.error('Executing unit task ${i+1}/${t.unit_tasks.len}: ${unit_task.name}')
-
- // Execute the unit task with the current input
- result := unit_task.execute(current_input)!
-
- // Update the current input for the next unit task
- current_input = result
- t.current_result = result
- }
-
- return t.current_result
-}
\ No newline at end of file
+pub fn (mut t Task) initiate(input string) !string {
+ mut current_input := input
+
+ for i, mut unit_task in t.unit_tasks {
+ log.error('Executing unit task ${i + 1}/${t.unit_tasks.len}: ${unit_task.name}')
+
+ // Execute the unit task with the current input
+ result := unit_task.execute(current_input)!
+
+ // Update the current input for the next unit task
+ current_input = result
+ t.current_result = result
+ }
+
+ return t.current_result
+}
diff --git a/lib/ai/escalayer/unit_task.v b/lib/ai/escalayer/unit_task.v
index 58268555..60a0e489 100644
--- a/lib/ai/escalayer/unit_task.v
+++ b/lib/ai/escalayer/unit_task.v
@@ -6,66 +6,66 @@ import freeflowuniverse.herolib.clients.openai
// UnitTask represents a single step in the task
pub struct UnitTask {
pub mut:
- name string
- prompt_function fn(string) string
- callback_function fn(string)! string
- base_model ModelConfig
- retry_model ModelConfig
- retry_count int
+ name string
+ prompt_function fn (string) string
+ callback_function fn (string) !string
+ base_model ModelConfig
+ retry_model ModelConfig
+ retry_count int
}
// Execute the unit task
-pub fn (mut ut UnitTask) execute(input string)! string {
- // Generate the prompt using the prompt function
- prompt := ut.prompt_function(input)
-
- // Try with the base model first
- mut current_model := ut.base_model
- mut attempts := 0
- mut max_attempts := ut.retry_count + 1 // +1 for the initial attempt
- mut absolute_max_attempts := 1 // Hard limit on total attempts
- mut last_error := ''
-
- for attempts < max_attempts && attempts < absolute_max_attempts {
- attempts++
-
- // If we've exhausted retries with the base model, switch to the retry model
- if attempts > ut.retry_count {
- log.error('Escalating to more powerful model: ${ut.retry_model.name}')
- current_model = ut.retry_model
- // Calculate remaining attempts but don't exceed absolute max
- max_attempts = attempts + ut.retry_count
- if max_attempts > absolute_max_attempts {
- max_attempts = absolute_max_attempts
- }
- }
-
- log.error('Attempt ${attempts} with model ${current_model.name}')
-
- // Prepare the prompt with error feedback if this is a retry
- mut current_prompt := prompt
- if last_error != '' {
- current_prompt = 'Previous attempt failed with error: ${last_error}\n\n${prompt}'
- }
-
- // Call the AI model
- response := call_ai_model(current_prompt, current_model) or {
- log.error('AI call failed: ${err}')
- last_error = err.str()
- continue // Try again
- }
-
- // Process the response with the callback function
- result := ut.callback_function(response) or {
- // If callback returns an error, retry with the error message
- log.error('Callback returned error: ${err}')
- last_error = err.str()
- continue // Try again
- }
-
- // If we get here, the callback was successful
- return result
- }
-
- return error('Failed to execute unit task after ${attempts} attempts. Last error: ${last_error}')
-}
\ No newline at end of file
+pub fn (mut ut UnitTask) execute(input string) !string {
+ // Generate the prompt using the prompt function
+ prompt := ut.prompt_function(input)
+
+ // Try with the base model first
+ mut current_model := ut.base_model
+ mut attempts := 0
+ mut max_attempts := ut.retry_count + 1 // +1 for the initial attempt
+ mut absolute_max_attempts := 1 // Hard limit on total attempts
+ mut last_error := ''
+
+ for attempts < max_attempts && attempts < absolute_max_attempts {
+ attempts++
+
+ // If we've exhausted retries with the base model, switch to the retry model
+ if attempts > ut.retry_count {
+ log.error('Escalating to more powerful model: ${ut.retry_model.name}')
+ current_model = ut.retry_model
+ // Calculate remaining attempts but don't exceed absolute max
+ max_attempts = attempts + ut.retry_count
+ if max_attempts > absolute_max_attempts {
+ max_attempts = absolute_max_attempts
+ }
+ }
+
+ log.error('Attempt ${attempts} with model ${current_model.name}')
+
+ // Prepare the prompt with error feedback if this is a retry
+ mut current_prompt := prompt
+ if last_error != '' {
+ current_prompt = 'Previous attempt failed with error: ${last_error}\n\n${prompt}'
+ }
+
+ // Call the AI model
+ response := call_ai_model(current_prompt, current_model) or {
+ log.error('AI call failed: ${err}')
+ last_error = err.str()
+ continue // Try again
+ }
+
+ // Process the response with the callback function
+ result := ut.callback_function(response) or {
+ // If callback returns an error, retry with the error message
+ log.error('Callback returned error: ${err}')
+ last_error = err.str()
+ continue // Try again
+ }
+
+ // If we get here, the callback was successful
+ return result
+ }
+
+ return error('Failed to execute unit task after ${attempts} attempts. Last error: ${last_error}')
+}
diff --git a/lib/ai/mcp/backend_interface.v b/lib/ai/mcp/backend_interface.v
index 5e3938b4..6a145ff5 100644
--- a/lib/ai/mcp/backend_interface.v
+++ b/lib/ai/mcp/backend_interface.v
@@ -23,7 +23,7 @@ interface Backend {
tool_get(name string) !Tool
tool_list() ![]Tool
tool_call(name string, arguments map[string]json2.Any) !ToolCallResult
-
+
// Sampling methods
sampling_create_message(params map[string]json2.Any) !SamplingCreateMessageResult
mut:
diff --git a/lib/ai/mcp/backend_memory.v b/lib/ai/mcp/backend_memory.v
index 9e922a96..6539a815 100644
--- a/lib/ai/mcp/backend_memory.v
+++ b/lib/ai/mcp/backend_memory.v
@@ -114,16 +114,14 @@ fn (b &MemoryBackend) prompt_messages_get(name string, arguments map[string]stri
return messages
}
-
fn (b &MemoryBackend) prompt_call(name string, arguments []string) ![]PromptMessage {
// Get the tool handler
handler := b.prompt_handlers[name] or { return error('tool handler not found') }
// Call the handler with the provided arguments
- return handler(arguments) or {panic(err)}
+ return handler(arguments) or { panic(err) }
}
-
// Tool related methods
fn (b &MemoryBackend) tool_exists(name string) !bool {
@@ -165,11 +163,11 @@ fn (b &MemoryBackend) sampling_create_message(params map[string]json2.Any) !Samp
// Return a default implementation that just echoes back a message
// indicating that no sampling handler is registered
return SamplingCreateMessageResult{
- model: 'default'
+ model: 'default'
stop_reason: 'endTurn'
- role: 'assistant'
- content: MessageContent{
- typ: 'text'
+ role: 'assistant'
+ content: MessageContent{
+ typ: 'text'
text: 'Sampling is not configured on this server. Please register a sampling handler.'
}
}
diff --git a/lib/ai/mcp/baobab/baobab_tools.v b/lib/ai/mcp/baobab/baobab_tools.v
index 3c80810b..0c71db4b 100644
--- a/lib/ai/mcp/baobab/baobab_tools.v
+++ b/lib/ai/mcp/baobab/baobab_tools.v
@@ -8,160 +8,165 @@ import freeflowuniverse.herolib.baobab.generator
import freeflowuniverse.herolib.baobab.specification
// generate_methods_file MCP Tool
-//
+//
const generate_methods_file_tool = mcp.Tool{
- name: 'generate_methods_file'
- description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- })}
- required: ['source']
- }
+ name: 'generate_methods_file'
+ description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'source': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ })
+ }
+ required: ['source']
+ }
}
pub fn (d &Baobab) generate_methods_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- source := json.decode[generator.Source](arguments["source"].str())!
- result := generator.generate_methods_file_str(source)
- or {
+ source := json.decode[generator.Source](arguments['source'].str())!
+ result := generator.generate_methods_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_module_from_openapi MCP Tool
const generate_module_from_openapi_tool = mcp.Tool{
- name: 'generate_module_from_openapi'
- description: ''
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })}
- required: ['openapi_path']
- }
+ name: 'generate_module_from_openapi'
+ description: ''
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ required: ['openapi_path']
+ }
}
pub fn (d &Baobab) generate_module_from_openapi_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- openapi_path := arguments["openapi_path"].str()
- result := generator.generate_module_from_openapi(openapi_path)
- or {
+ openapi_path := arguments['openapi_path'].str()
+ result := generator.generate_module_from_openapi(openapi_path) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_methods_interface_file MCP Tool
const generate_methods_interface_file_tool = mcp.Tool{
- name: 'generate_methods_interface_file'
- description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- })}
- required: ['source']
- }
+ name: 'generate_methods_interface_file'
+ description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'source': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ })
+ }
+ required: ['source']
+ }
}
pub fn (d &Baobab) generate_methods_interface_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- source := json.decode[generator.Source](arguments["source"].str())!
- result := generator.generate_methods_interface_file_str(source)
- or {
+ source := json.decode[generator.Source](arguments['source'].str())!
+ result := generator.generate_methods_interface_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_model_file MCP Tool
const generate_model_file_tool = mcp.Tool{
- name: 'generate_model_file'
- description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- })}
- required: ['source']
- }
+ name: 'generate_model_file'
+ description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'source': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ })
+ }
+ required: ['source']
+ }
}
pub fn (d &Baobab) generate_model_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- source := json.decode[generator.Source](arguments["source"].str())!
- result := generator.generate_model_file_str(source)
- or {
+ source := json.decode[generator.Source](arguments['source'].str())!
+ result := generator.generate_model_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_methods_example_file MCP Tool
const generate_methods_example_file_tool = mcp.Tool{
- name: 'generate_methods_example_file'
- description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- })}
- required: ['source']
- }
+ name: 'generate_methods_example_file'
+ description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'source': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ })
+ }
+ required: ['source']
+ }
}
pub fn (d &Baobab) generate_methods_example_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- source := json.decode[generator.Source](arguments["source"].str())!
- result := generator.generate_methods_example_file_str(source)
- or {
+ source := json.decode[generator.Source](arguments['source'].str())!
+ result := generator.generate_methods_example_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/baobab/baobab_tools_test.v b/lib/ai/mcp/baobab/baobab_tools_test.v
index 4ad534b8..cd406ca2 100644
--- a/lib/ai/mcp/baobab/baobab_tools_test.v
+++ b/lib/ai/mcp/baobab/baobab_tools_test.v
@@ -13,7 +13,7 @@ import os
fn test_generate_module_from_openapi_tool() {
// Verify the tool definition
assert generate_module_from_openapi_tool.name == 'generate_module_from_openapi', 'Tool name should be "generate_module_from_openapi"'
-
+
// Verify the input schema
assert generate_module_from_openapi_tool.input_schema.typ == 'object', 'Input schema type should be "object"'
assert 'openapi_path' in generate_module_from_openapi_tool.input_schema.properties, 'Input schema should have "openapi_path" property'
@@ -26,14 +26,14 @@ fn test_generate_module_from_openapi_tool_handler_error() {
// Create arguments with a non-existent file path
mut arguments := map[string]json2.Any{}
arguments['openapi_path'] = json2.Any('non_existent_file.yaml')
-
+
// Call the handler
result := generate_module_from_openapi_tool_handler(arguments) or {
// If the handler returns an error, that's expected
assert err.msg().contains(''), 'Error message should not be empty'
return
}
-
+
// If we get here, the handler should have returned an error result
assert result.is_error, 'Result should indicate an error'
assert result.content.len > 0, 'Error content should not be empty'
@@ -48,7 +48,7 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to create MCP server: ${err}'
return
}
-
+
// Create a temporary OpenAPI file for testing
temp_dir := os.temp_dir()
temp_file := os.join_path(temp_dir, 'test_openapi.yaml')
@@ -56,30 +56,30 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to create temporary file: ${err}'
return
}
-
+
// Sample tool call request
tool_call_request := '{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"generate_module_from_openapi","arguments":{"openapi_path":"${temp_file}"}}}'
-
+
// Process the request through the handler
response := server.handler.handle(tool_call_request) or {
// Clean up the temporary file
os.rm(temp_file) or {}
-
+
// If the handler returns an error, that's expected in this test environment
// since we might not have all dependencies set up
return
}
-
+
// Clean up the temporary file
os.rm(temp_file) or {}
-
+
// Decode the response to verify its structure
decoded_response := jsonrpc.decode_response(response) or {
// In a test environment, we might get an error due to missing dependencies
// This is acceptable for this test
return
}
-
+
// If we got a successful response, verify it
if !decoded_response.is_error() {
// Parse the result to verify its contents
@@ -87,15 +87,15 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to get result: ${err}'
return
}
-
+
// Decode the result to check the content
result_map := json2.raw_decode(result_json) or {
assert false, 'Failed to decode result: ${err}'
return
}.as_map()
-
+
// Verify the result structure
assert 'isError' in result_map, 'Result should have isError field'
assert 'content' in result_map, 'Result should have content field'
}
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/baobab/command.v b/lib/ai/mcp/baobab/command.v
index 7fe612ba..d75a67c8 100644
--- a/lib/ai/mcp/baobab/command.v
+++ b/lib/ai/mcp/baobab/command.v
@@ -2,22 +2,21 @@ module baobab
import cli
-pub const command := cli.Command{
- sort_flags: true
- name: 'baobab'
+pub const command = cli.Command{
+ sort_flags: true
+ name: 'baobab'
// execute: cmd_mcpgen
description: 'baobab command'
- commands: [
+ commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the Baobab server'
- }
+ },
]
-
}
fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server(&Baobab{})!
server.start()!
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/baobab/mcp_test.v b/lib/ai/mcp/baobab/mcp_test.v
index 4d31d386..f2d02dcc 100644
--- a/lib/ai/mcp/baobab/mcp_test.v
+++ b/lib/ai/mcp/baobab/mcp_test.v
@@ -67,7 +67,7 @@ fn test_mcp_server_initialize() {
// Verify the protocol version matches what was requested
assert result.protocol_version == '2024-11-05', 'Protocol version should match the request'
-
+
// Verify server info
assert result.server_info.name == 'developer', 'Server name should be "developer"'
}
@@ -113,7 +113,7 @@ fn test_tools_list() {
// Verify that the tools array exists and contains the expected tool
tools := result_map['tools'].arr()
assert tools.len > 0, 'Tools list should not be empty'
-
+
// Find the generate_module_from_openapi tool
mut found_tool := false
for tool in tools {
@@ -123,6 +123,6 @@ fn test_tools_list() {
break
}
}
-
+
assert found_tool, 'generate_module_from_openapi tool should be registered'
}
diff --git a/lib/ai/mcp/baobab/server.v b/lib/ai/mcp/baobab/server.v
index 2cd32436..c90c24c8 100644
--- a/lib/ai/mcp/baobab/server.v
+++ b/lib/ai/mcp/baobab/server.v
@@ -13,18 +13,18 @@ pub fn new_mcp_server(v &Baobab) !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
- 'generate_module_from_openapi': generate_module_from_openapi_tool
- 'generate_methods_file': generate_methods_file_tool
+ 'generate_module_from_openapi': generate_module_from_openapi_tool
+ 'generate_methods_file': generate_methods_file_tool
'generate_methods_interface_file': generate_methods_interface_file_tool
- 'generate_model_file': generate_model_file_tool
- 'generate_methods_example_file': generate_methods_example_file_tool
+ 'generate_model_file': generate_model_file_tool
+ 'generate_methods_example_file': generate_methods_example_file_tool
}
tool_handlers: {
- 'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler
- 'generate_methods_file': v.generate_methods_file_tool_handler
+ 'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler
+ 'generate_methods_file': v.generate_methods_file_tool_handler
'generate_methods_interface_file': v.generate_methods_interface_file_tool_handler
- 'generate_model_file': v.generate_model_file_tool_handler
- 'generate_methods_example_file': v.generate_methods_example_file_tool_handler
+ 'generate_model_file': v.generate_model_file_tool_handler
+ 'generate_methods_example_file': v.generate_methods_example_file_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -35,4 +35,4 @@ pub fn new_mcp_server(v &Baobab) !&mcp.Server {
}
})!
return server
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/cmd/compile.vsh b/lib/ai/mcp/cmd/compile.vsh
index 0f994400..687d701f 100755
--- a/lib/ai/mcp/cmd/compile.vsh
+++ b/lib/ai/mcp/cmd/compile.vsh
@@ -13,20 +13,20 @@ prod_mode := fp.bool('prod', `p`, false, 'Build production version (optimized)')
help_requested := fp.bool('help', `h`, false, 'Show help message')
if help_requested {
- println(fp.usage())
- exit(0)
+ println(fp.usage())
+ exit(0)
}
additional_args := fp.finalize() or {
- eprintln(err)
- println(fp.usage())
- exit(1)
+ eprintln(err)
+ println(fp.usage())
+ exit(1)
}
if additional_args.len > 0 {
- eprintln('Unexpected arguments: ${additional_args.join(' ')}')
- println(fp.usage())
- exit(1)
+ eprintln('Unexpected arguments: ${additional_args.join(' ')}')
+ println(fp.usage())
+ exit(1)
}
// Change to the mcp directory
@@ -36,20 +36,20 @@ os.chdir(mcp_dir) or { panic('Failed to change directory to ${mcp_dir}: ${err}')
// Set MCPPATH based on OS
mut mcppath := '/usr/local/bin/mcp'
if os.user_os() == 'macos' {
- mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp')
+ mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp')
}
// Set compilation command based on OS and mode
compile_cmd := if prod_mode {
- 'v -enable-globals -w -n -prod mcp.v'
+ 'v -enable-globals -w -n -prod mcp.v'
} else {
- 'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v'
+ 'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v'
}
println('Building MCP in ${if prod_mode { 'production' } else { 'debug' }} mode...')
if os.system(compile_cmd) != 0 {
- panic('Failed to compile mcp.v with command: ${compile_cmd}')
+ panic('Failed to compile mcp.v with command: ${compile_cmd}')
}
// Make executable
diff --git a/lib/ai/mcp/cmd/mcp.v b/lib/ai/mcp/cmd/mcp.v
index ee109d23..9c6417e5 100644
--- a/lib/ai/mcp/cmd/mcp.v
+++ b/lib/ai/mcp/cmd/mcp.v
@@ -45,11 +45,11 @@ mcp
description: 'show verbose output'
})
- mut cmd_inspector := cli.Command{
+ mut cmd_inspector := Command{
sort_flags: true
name: 'inspector'
execute: cmd_inspector_execute
- description: 'will list existing mdbooks'
+ description: 'will list existing mdbooks'
}
cmd_inspector.add_flag(Flag{
@@ -68,7 +68,6 @@ mcp
description: 'open inspector'
})
-
cmd_mcp.add_command(rhai_mcp.command)
cmd_mcp.add_command(rust.command)
// cmd_mcp.add_command(baobab.command)
@@ -79,7 +78,7 @@ mcp
cmd_mcp.parse(os.args)
}
-fn cmd_inspector_execute(cmd cli.Command) ! {
+fn cmd_inspector_execute(cmd Command) ! {
open := cmd.flags.get_bool('open') or { false }
if open {
osal.exec(cmd: 'open http://localhost:5173')!
@@ -91,4 +90,4 @@ fn cmd_inspector_execute(cmd cli.Command) ! {
} else {
osal.exec(cmd: 'npx @modelcontextprotocol/inspector')!
}
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/generics.v b/lib/ai/mcp/generics.v
index 99aff54b..285bc575 100644
--- a/lib/ai/mcp/generics.v
+++ b/lib/ai/mcp/generics.v
@@ -1,6 +1,5 @@
module mcp
-
pub fn result_to_mcp_tool_contents[T](result T) []ToolContent {
return [result_to_mcp_tool_content[T](result)]
}
@@ -50,4 +49,4 @@ pub fn array_to_mcp_tool_contents[U](array []U) []ToolContent {
contents << result_to_mcp_tool_content(item)
}
return contents
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/handler_prompts.v b/lib/ai/mcp/handler_prompts.v
index d35ba6aa..bfb26fe0 100644
--- a/lib/ai/mcp/handler_prompts.v
+++ b/lib/ai/mcp/handler_prompts.v
@@ -110,7 +110,8 @@ fn (mut s Server) prompts_get_handler(data string) !string {
// messages := s.backend.prompt_messages_get(request.params.name, request.params.arguments)!
// Create a success response with the result
- response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(), PromptGetResult{
+ response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(),
+ PromptGetResult{
description: prompt.description
messages: messages
})
diff --git a/lib/ai/mcp/handler_sampling.v b/lib/ai/mcp/handler_sampling.v
index ce0d8053..524d0046 100644
--- a/lib/ai/mcp/handler_sampling.v
+++ b/lib/ai/mcp/handler_sampling.v
@@ -30,9 +30,9 @@ pub:
pub struct ModelPreferences {
pub:
- hints []ModelHint
- cost_priority f32 @[json: 'costPriority']
- speed_priority f32 @[json: 'speedPriority']
+ hints []ModelHint
+ cost_priority f32 @[json: 'costPriority']
+ speed_priority f32 @[json: 'speedPriority']
intelligence_priority f32 @[json: 'intelligencePriority']
}
@@ -43,8 +43,8 @@ pub:
system_prompt string @[json: 'systemPrompt']
include_context string @[json: 'includeContext']
temperature f32
- max_tokens int @[json: 'maxTokens']
- stop_sequences []string @[json: 'stopSequences']
+ max_tokens int @[json: 'maxTokens']
+ stop_sequences []string @[json: 'stopSequences']
metadata map[string]json2.Any
}
@@ -63,21 +63,21 @@ fn (mut s Server) sampling_create_message_handler(data string) !string {
request_map := json2.raw_decode(data)!.as_map()
id := request_map['id'].int()
params_map := request_map['params'].as_map()
-
+
// Validate required parameters
if 'messages' !in params_map {
return jsonrpc.new_error_response(id, missing_required_argument('messages')).encode()
}
-
+
if 'maxTokens' !in params_map {
return jsonrpc.new_error_response(id, missing_required_argument('maxTokens')).encode()
}
-
+
// Call the backend to handle the sampling request
result := s.backend.sampling_create_message(params_map) or {
return jsonrpc.new_error_response(id, sampling_error(err.msg())).encode()
}
-
+
// Create a success response with the result
response := jsonrpc.new_response(id, json.encode(result))
return response.encode()
@@ -87,30 +87,30 @@ fn (mut s Server) sampling_create_message_handler(data string) !string {
fn parse_messages(messages_json json2.Any) ![]Message {
messages_arr := messages_json.arr()
mut result := []Message{cap: messages_arr.len}
-
+
for msg_json in messages_arr {
msg_map := msg_json.as_map()
-
+
if 'role' !in msg_map {
return error('Missing role in message')
}
-
+
if 'content' !in msg_map {
return error('Missing content in message')
}
-
+
role := msg_map['role'].str()
content_map := msg_map['content'].as_map()
-
+
if 'type' !in content_map {
return error('Missing type in message content')
}
-
+
typ := content_map['type'].str()
mut text := ''
mut data := ''
mut mimetype := ''
-
+
if typ == 'text' {
if 'text' !in content_map {
return error('Missing text in text content')
@@ -121,7 +121,7 @@ fn parse_messages(messages_json json2.Any) ![]Message {
return error('Missing data in image content')
}
data = content_map['data'].str()
-
+
if 'mimeType' !in content_map {
return error('Missing mimeType in image content')
}
@@ -129,17 +129,17 @@ fn parse_messages(messages_json json2.Any) ![]Message {
} else {
return error('Unsupported content type: ${typ}')
}
-
+
result << Message{
- role: role
+ role: role
content: MessageContent{
- typ: typ
- text: text
- data: data
+ typ: typ
+ text: text
+ data: data
mimetype: mimetype
}
}
}
-
+
return result
}
diff --git a/lib/ai/mcp/handler_tools.v b/lib/ai/mcp/handler_tools.v
index fdeefbc8..d4410a59 100644
--- a/lib/ai/mcp/handler_tools.v
+++ b/lib/ai/mcp/handler_tools.v
@@ -26,8 +26,8 @@ pub:
pub struct ToolItems {
pub:
- typ string @[json: 'type']
- enum []string
+ typ string @[json: 'type']
+ enum []string
properties map[string]ToolProperty
}
@@ -63,7 +63,7 @@ fn (mut s Server) tools_list_handler(data string) !string {
// TODO: Implement pagination logic using the cursor
// For now, return all tools
-encoded := json.encode(ToolListResult{
+ encoded := json.encode(ToolListResult{
tools: s.backend.tool_list()!
next_cursor: '' // Empty if no more pages
})
@@ -148,4 +148,4 @@ pub fn error_tool_call_result(err IError) ToolCallResult {
text: err.msg()
}]
}
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/mcpgen/command.v b/lib/ai/mcp/mcpgen/command.v
index 37735734..c46d43ec 100644
--- a/lib/ai/mcp/mcpgen/command.v
+++ b/lib/ai/mcp/mcpgen/command.v
@@ -2,22 +2,21 @@ module mcpgen
import cli
-pub const command := cli.Command{
- sort_flags: true
- name: 'mcpgen'
+pub const command = cli.Command{
+ sort_flags: true
+ name: 'mcpgen'
// execute: cmd_mcpgen
description: 'will list existing mdbooks'
- commands: [
+ commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the MCP server'
- }
+ },
]
-
}
fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server(&MCPGen{})!
server.start()!
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/mcpgen/mcpgen.v b/lib/ai/mcp/mcpgen/mcpgen.v
index cfa173b8..3916aa21 100644
--- a/lib/ai/mcp/mcpgen/mcpgen.v
+++ b/lib/ai/mcp/mcpgen/mcpgen.v
@@ -7,7 +7,7 @@ import freeflowuniverse.herolib.schemas.jsonschema.codegen
import os
pub struct FunctionPointer {
- name string // name of function
+ name string // name of function
module_path string // path to module
}
@@ -15,14 +15,14 @@ pub struct FunctionPointer {
// returns an MCP Tool code in v for attaching the function to the mcp server
// function_pointers: A list of function pointers to generate tools for
pub fn (d &MCPGen) create_mcp_tools_code(function_pointers []FunctionPointer) !string {
- mut str := ""
+ mut str := ''
for function_pointer in function_pointers {
str += d.create_mcp_tool_code(function_pointer.name, function_pointer.module_path)!
}
-
+
return str
-}
+}
// create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
// returns an MCP Tool code in v for attaching the function to the mcp server
@@ -30,11 +30,10 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
if !os.exists(module_path) {
return error('Module path does not exist: ${module_path}')
}
-
+
function := code.get_function_from_module(module_path, function_name) or {
return error('Failed to get function ${function_name} from module ${module_path}\n${err}')
}
-
mut types := map[string]string{}
for param in function.params {
@@ -43,9 +42,9 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
types[param.typ.symbol()] = code.get_type_from_module(module_path, param.typ.symbol())!
}
}
-
+
// Get the result type if it's a struct
- mut result_ := ""
+ mut result_ := ''
if function.result.typ is code.Result {
result_type := (function.result.typ as code.Result).typ
if result_type is code.Object {
@@ -60,7 +59,7 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
handler := d.create_mcp_tool_handler(function, types, result_)!
str := $tmpl('./templates/tool_code.v.template')
return str
-}
+}
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
// function: The V function string including preceding comments
@@ -68,7 +67,7 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
// result: The type of result of the create_mcp_tool function. Could be simply string, or struct {...}
pub fn (d &MCPGen) create_mcp_tool_handler(function code.Function, types map[string]string, result_ string) !string {
decode_stmts := function.params.map(argument_decode_stmt(it)).join_lines()
-
+
function_call := 'd.${function.name}(${function.params.map(it.name).join(',')})'
result := code.parse_type(result_)
str := $tmpl('./templates/tool_handler.v.template')
@@ -92,6 +91,7 @@ pub fn argument_decode_stmt(param code.Param) string {
panic('Unsupported type: ${param.typ}')
}
}
+
/*
in @generate_mcp.v , implement a create_mpc_tool_handler function that given a vlang function string and the types that map to their corresponding type definitions (for instance struct some_type: SomeType{...}), generates a vlang function such as the following:
@@ -103,7 +103,6 @@ pub fn (d &MCPGen) create_mcp_tool_tool_handler(arguments map[string]Any) !mcp.T
}
*/
-
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
// function: The V function string including preceding comments
// types: A map of struct names to their definitions for complex parameter types
@@ -111,14 +110,14 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// Create input schema for parameters
mut properties := map[string]jsonschema.SchemaRef{}
mut required := []string{}
-
+
for param in function.params {
// Add to required parameters
required << param.name
-
+
// Create property for this parameter
mut property := jsonschema.SchemaRef{}
-
+
// Check if this is a complex type defined in the types map
if param.typ.symbol() in types {
// Parse the struct definition to create a nested schema
@@ -133,21 +132,21 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// Handle primitive types
property = codegen.typesymbol_to_schema(param.typ.symbol())
}
-
+
properties[param.name] = property
}
-
+
// Create the input schema
input_schema := jsonschema.Schema{
- typ: 'object',
- properties: properties,
- required: required
+ typ: 'object'
+ properties: properties
+ required: required
}
-
+
// Create and return the Tool
return mcp.Tool{
- name: function.name,
- description: function.description,
+ name: function.name
+ description: function.description
input_schema: input_schema
}
}
@@ -157,7 +156,7 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// // returns: A jsonschema.Schema for the given input type
// // errors: Returns an error if the input type is not supported
// pub fn (d MCPGen) create_mcp_tool_input_schema(input string) !jsonschema.Schema {
-
+
// // if input is a primitive type, return a mcp jsonschema.Schema with that type
// if input == 'string' {
// return jsonschema.Schema{
@@ -176,30 +175,30 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// typ: 'boolean'
// }
// }
-
+
// // if input is a struct, return a mcp jsonschema.Schema with typ 'object' and properties for each field in the struct
// if input.starts_with('pub struct ') {
// struct_name := input[11..].split(' ')[0]
// fields := parse_struct_fields(input)
// mut properties := map[string]jsonschema.Schema{}
-
+
// for field_name, field_type in fields {
// property := jsonschema.Schema{
// typ: d.create_mcp_tool_input_schema(field_type)!.typ
// }
// properties[field_name] = property
// }
-
+
// return jsonschema.Schema{
// typ: 'object',
// properties: properties
// }
// }
-
+
// // if input is an array, return a mcp jsonschema.Schema with typ 'array' and items of the item type
// if input.starts_with('[]') {
// item_type := input[2..]
-
+
// // For array types, we create a schema with type 'array'
// // The actual item type is determined by the primitive type
// mut item_type_str := 'string' // default
@@ -210,74 +209,73 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// } else if item_type == 'bool' {
// item_type_str = 'boolean'
// }
-
+
// // Create a property for the array items
// mut property := jsonschema.Schema{
// typ: 'array'
// }
-
+
// // Add the property to the schema
// mut properties := map[string]jsonschema.Schema{}
// properties['items'] = property
-
+
// return jsonschema.Schema{
// typ: 'array',
// properties: properties
// }
// }
-
+
// // Default to string type for unknown types
// return jsonschema.Schema{
// typ: 'string'
// }
// }
-
// parse_struct_fields parses a V language struct definition string and returns a map of field names to their types
fn parse_struct_fields(struct_def string) map[string]string {
mut fields := map[string]string{}
-
+
// Find the opening and closing braces of the struct definition
start_idx := struct_def.index('{') or { return fields }
end_idx := struct_def.last_index('}') or { return fields }
-
+
// Extract the content between the braces
struct_content := struct_def[start_idx + 1..end_idx].trim_space()
-
+
// Split the content by newlines to get individual field definitions
field_lines := struct_content.split('
')
-
+
for line in field_lines {
trimmed_line := line.trim_space()
-
+
// Skip empty lines and comments
if trimmed_line == '' || trimmed_line.starts_with('//') {
continue
}
-
+
// Handle pub: or mut: prefixes
mut field_def := trimmed_line
if field_def.starts_with('pub:') || field_def.starts_with('mut:') {
field_def = field_def.all_after(':').trim_space()
}
-
+
// Split by whitespace to separate field name and type
parts := field_def.split_any(' ')
if parts.len < 2 {
continue
}
-
+
field_name := parts[0]
field_type := parts[1..].join(' ')
-
+
// Handle attributes like @[json: 'name']
if field_name.contains('@[') {
continue
}
-
+
fields[field_name] = field_type
}
-
+
return fields
}
diff --git a/lib/ai/mcp/mcpgen/mcpgen_tools.v b/lib/ai/mcp/mcpgen/mcpgen_tools.v
index 6e5fdcaa..ade149f6 100644
--- a/lib/ai/mcp/mcpgen/mcpgen_tools.v
+++ b/lib/ai/mcp/mcpgen/mcpgen_tools.v
@@ -12,42 +12,41 @@ import x.json2 as json { Any }
// function_pointers: A list of function pointers to generate tools for
const create_mcp_tools_code_tool = mcp.Tool{
- name: 'create_mcp_tools_code'
- description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
+ name: 'create_mcp_tools_code'
+ description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
returns an MCP Tool code in v for attaching the function to the mcp server
function_pointers: A list of function pointers to generate tools for'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {
- 'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'array'
- items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- required: ['name', 'module_path']
- }))
- })
- }
- required: ['function_pointers']
- }
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'array'
+ items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'name': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ required: ['name', 'module_path']
+ }))
+ })
+ }
+ required: ['function_pointers']
+ }
}
pub fn (d &MCPGen) create_mcp_tools_code_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- function_pointers := json.decode[[]FunctionPointer](arguments["function_pointers"].str())!
- result := d.create_mcp_tools_code(function_pointers)
- or {
+ function_pointers := json.decode[[]FunctionPointer](arguments['function_pointers'].str())!
+ result := d.create_mcp_tools_code(function_pointers) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -59,10 +58,10 @@ returns an MCP Tool code in v for attaching the function to the mcp server'
typ: 'object'
properties: {
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
})
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
})
}
required: ['function_name', 'module_path']
diff --git a/lib/ai/mcp/mcpgen/server.v b/lib/ai/mcp/mcpgen/server.v
index 678bd83b..ddc0a68b 100644
--- a/lib/ai/mcp/mcpgen/server.v
+++ b/lib/ai/mcp/mcpgen/server.v
@@ -12,16 +12,16 @@ pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
- 'create_mcp_tool_code': create_mcp_tool_code_tool
- 'create_mcp_tool_const': create_mcp_tool_const_tool
+ 'create_mcp_tool_code': create_mcp_tool_code_tool
+ 'create_mcp_tool_const': create_mcp_tool_const_tool
'create_mcp_tool_handler': create_mcp_tool_handler_tool
- 'create_mcp_tools_code': create_mcp_tools_code_tool
+ 'create_mcp_tools_code': create_mcp_tools_code_tool
}
tool_handlers: {
- 'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler
- 'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler
+ 'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler
+ 'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler
'create_mcp_tool_handler': v.create_mcp_tool_handler_tool_handler
- 'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler
+ 'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -32,4 +32,4 @@ pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
}
})!
return server
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/pugconvert/cmd/main.v b/lib/ai/mcp/pugconvert/cmd/main.v
index 2be61425..5bf6791b 100644
--- a/lib/ai/mcp/pugconvert/cmd/main.v
+++ b/lib/ai/mcp/pugconvert/cmd/main.v
@@ -8,7 +8,7 @@ fn main() {
eprintln('Failed to create MCP server: ${err}')
return
}
-
+
// Start the server
server.start() or {
eprintln('Failed to start MCP server: ${err}')
diff --git a/lib/ai/mcp/pugconvert/logic/convertpug.v b/lib/ai/mcp/pugconvert/logic/convertpug.v
index 33206bb7..dfb6cca9 100644
--- a/lib/ai/mcp/pugconvert/logic/convertpug.v
+++ b/lib/ai/mcp/pugconvert/logic/convertpug.v
@@ -5,8 +5,7 @@ import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import json
-pub fn convert_pug(mydir string)! {
-
+pub fn convert_pug(mydir string) ! {
mut d := pathlib.get_dir(path: mydir, create: false)!
list := d.list(regex: [r'.*\.pug$'], include_links: false, files_only: true)!
for item in list.paths {
@@ -17,12 +16,12 @@ pub fn convert_pug(mydir string)! {
// extract_template parses AI response content to extract just the template
fn extract_template(raw_content string) string {
mut content := raw_content
-
+
// First check for tag
if content.contains('') {
content = content.split('')[1].trim_space()
}
-
+
// Look for ```jet code block
if content.contains('```jet') {
parts := content.split('```jet')
@@ -39,7 +38,7 @@ fn extract_template(raw_content string) string {
// Take the content between the first set of ```
// This handles both ```content``` and cases where there's only an opening ```
content = parts[1].trim_space()
-
+
// If we only see an opening ``` but no closing, cleanup any remaining backticks
// to avoid incomplete formatting markers
if !content.contains('```') {
@@ -47,16 +46,16 @@ fn extract_template(raw_content string) string {
}
}
}
-
+
return content
}
-pub fn convert_pug_file(myfile string)! {
+pub fn convert_pug_file(myfile string) ! {
println(myfile)
// Create new file path by replacing .pug extension with .jet
jet_file := myfile.replace('.pug', '.jet')
-
+
// Check if jet file already exists, if so skip processing
mut jet_path_exist := pathlib.get_file(path: jet_file, create: false)!
if jet_path_exist.exists() {
@@ -69,7 +68,7 @@ pub fn convert_pug_file(myfile string)! {
mut l := loader()
mut client := openai.get()!
-
+
base_instruction := '
You are a template language converter. You convert Pug templates to Jet templates.
@@ -82,25 +81,24 @@ pub fn convert_pug_file(myfile string)! {
only output the resulting template, no explanation, no steps, just the jet template
'
-
// We'll retry up to 5 times if validation fails
max_attempts := 5
mut attempts := 0
mut is_valid := false
mut error_message := ''
mut template := ''
-
+
for attempts < max_attempts && !is_valid {
attempts++
-
- mut system_content := texttools.dedent(base_instruction) + "\n" + l.jet()
+
+ mut system_content := texttools.dedent(base_instruction) + '\n' + l.jet()
mut user_prompt := ''
-
+
// Create different prompts for first attempt vs retries
if attempts == 1 {
// First attempt - convert from PUG
- user_prompt = texttools.dedent(base_user_prompt) + "\n" + content
-
+ user_prompt = texttools.dedent(base_user_prompt) + '\n' + content
+
// Print what we're sending to the AI service
println('Sending to OpenAI for conversion:')
println('--------------------------------')
@@ -127,53 +125,57 @@ Please fix the template and try again. Learn from feedback and check which jet t
Return only the corrected Jet template.
Dont send back more information than the fixed template, make sure its in jet format.
- '
-
- // Print what we're sending for the retry
+ ' // Print what we're sending for the retry
+
println('Sending to OpenAI for correction:')
println('--------------------------------')
println(user_prompt)
println('--------------------------------')
}
-
+
mut m := openai.Messages{
messages: [
openai.Message{
role: .system
content: system_content
- },
+ },
openai.Message{
role: .user
content: user_prompt
},
- ]}
-
+ ]
+ }
+
// Create a chat completion request
- res := client.chat_completion(msgs: m, model: "deepseek-r1-distill-llama-70b", max_completion_tokens: 64000)!
-
- println("-----")
-
+ res := client.chat_completion(
+ msgs: m
+ model: 'deepseek-r1-distill-llama-70b'
+ max_completion_tokens: 64000
+ )!
+
+ println('-----')
+
// Print AI response before extraction
println('Response received from AI:')
println('--------------------------------')
println(res.choices[0].message.content)
println('--------------------------------')
-
+
// Extract the template from the AI response
template = extract_template(res.choices[0].message.content)
-
+
println('Extracted template for ${myfile}:')
println('--------------------------------')
println(template)
println('--------------------------------')
-
+
// Validate the template
validation_result := jetvaliditycheck(template) or {
// If validation service is unavailable, we'll just proceed with the template
println('Warning: Template validation service unavailable: ${err}')
break
}
-
+
// Check if template is valid
if validation_result.is_valid {
is_valid = true
@@ -183,19 +185,19 @@ Dont send back more information than the fixed template, make sure its in jet fo
println('Template validation failed: ${error_message}')
}
}
-
+
// Report the validation outcome
if is_valid {
println('Successfully converted template after ${attempts} attempt(s)')
// Create the file and write the processed content
- println("Converted to: ${jet_file}")
+ println('Converted to: ${jet_file}')
mut jet_path := pathlib.get_file(path: jet_file, create: true)!
- jet_path.write(template)!
+ jet_path.write(template)!
} else if attempts >= max_attempts {
println('Warning: Could not validate template after ${max_attempts} attempts')
println('Using best attempt despite validation errors: ${error_message}')
- jet_file2:=jet_file.replace(".jet","_error.jet")
+ jet_file2 := jet_file.replace('.jet', '_error.jet')
mut jet_path2 := pathlib.get_file(path: jet_file2, create: true)!
- jet_path2.write(template)!
+ jet_path2.write(template)!
}
}
diff --git a/lib/ai/mcp/pugconvert/logic/jetvalidation.v b/lib/ai/mcp/pugconvert/logic/jetvalidation.v
index 3daca315..bad12bf3 100644
--- a/lib/ai/mcp/pugconvert/logic/jetvalidation.v
+++ b/lib/ai/mcp/pugconvert/logic/jetvalidation.v
@@ -5,9 +5,9 @@ import json
// JetTemplateResponse is the expected response structure from the validation service
struct JetTemplateResponse {
- valid bool
- message string
- error string
+ valid bool
+ message string
+ error string
}
// ValidationResult represents the result of a template validation
@@ -30,7 +30,7 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
template_data := json.encode({
'template': jetcontent
})
-
+
// Print what we're sending to the AI service
// println('Sending to JET validation service:')
// println('--------------------------------')
@@ -39,8 +39,8 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// Send the POST request to the validation endpoint
req := httpconnection.Request{
- prefix: 'checkjet',
- data: template_data,
+ prefix: 'checkjet'
+ data: template_data
dataformat: .json
}
@@ -49,7 +49,7 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// Handle connection errors
return ValidationResult{
is_valid: false
- error: 'Connection error: ${err}'
+ error: 'Connection error: ${err}'
}
}
@@ -58,12 +58,12 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// If we can't parse JSON using our struct, the server didn't return the expected format
return ValidationResult{
is_valid: false
- error: 'Server returned unexpected format: ${err.msg()}'
+ error: 'Server returned unexpected format: ${err.msg()}'
}
}
// Use the structured response data
- if response.valid == false{
+ if response.valid == false {
error_msg := if response.error != '' {
response.error
} else if response.message != '' {
@@ -74,12 +74,12 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
return ValidationResult{
is_valid: false
- error: error_msg
+ error: error_msg
}
}
return ValidationResult{
is_valid: true
- error: ''
+ error: ''
}
}
diff --git a/lib/ai/mcp/pugconvert/logic/loader.v b/lib/ai/mcp/pugconvert/logic/loader.v
index 9853cac9..2a35d454 100644
--- a/lib/ai/mcp/pugconvert/logic/loader.v
+++ b/lib/ai/mcp/pugconvert/logic/loader.v
@@ -10,12 +10,11 @@ pub mut:
}
fn (mut loader FileLoader) load() {
- loader.embedded_files["jet"]=$embed_file('templates/jet_instructions.md')
+ loader.embedded_files['jet'] = $embed_file('templates/jet_instructions.md')
}
-
fn (mut loader FileLoader) jet() string {
- c:=loader.embedded_files["jet"] or { panic("bug embed") }
+ c := loader.embedded_files['jet'] or { panic('bug embed') }
return c.to_string()
}
@@ -23,4 +22,4 @@ fn loader() FileLoader {
mut loader := FileLoader{}
loader.load()
return loader
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/pugconvert/mcp/handlers.v b/lib/ai/mcp/pugconvert/mcp/handlers.v
index bf69799c..4d944de8 100644
--- a/lib/ai/mcp/pugconvert/mcp/handlers.v
+++ b/lib/ai/mcp/pugconvert/mcp/handlers.v
@@ -8,47 +8,47 @@ import os
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
-
+
// Check if path exists
if !os.exists(path) {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
+ content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
}
}
-
+
// Determine if path is a file or directory
is_directory := os.is_dir(path)
-
- mut message := ""
-
+
+ mut message := ''
+
if is_directory {
// Convert all pug files in the directory
pugconvert.convert_pug(path) or {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error converting pug files in directory: ${err}")
+ content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
}
}
message = "Successfully converted all pug files in directory '${path}'"
- } else if path.ends_with(".pug") {
+ } else if path.ends_with('.pug') {
// Convert a single pug file
pugconvert.convert_pug_file(path) or {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error converting pug file: ${err}")
+ content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
}
}
message = "Successfully converted pug file '${path}'"
} else {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
+ content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
}
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](message)
+ content: mcp.result_to_mcp_tool_contents[string](message)
}
}
diff --git a/lib/ai/mcp/pugconvert/mcp/specifications.v b/lib/ai/mcp/pugconvert/mcp/specifications.v
index 64c78760..3a912407 100644
--- a/lib/ai/mcp/pugconvert/mcp/specifications.v
+++ b/lib/ai/mcp/pugconvert/mcp/specifications.v
@@ -1,18 +1,18 @@
module mcp
import freeflowuniverse.herolib.ai.mcp
-import x.json2 as json { Any }
+import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.ai.mcp.logger
const specs = mcp.Tool{
name: 'pugconvert'
description: 'Convert Pug template files to Jet template files'
- input_schema: jsonschema.Schema{
+ input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string',
+ typ: 'string'
description: 'Path to a .pug file or directory containing .pug files to convert'
})
}
diff --git a/lib/ai/mcp/rhai/cmd/main.v b/lib/ai/mcp/rhai/cmd/main.v
index 8fe36e48..6ed65cd7 100644
--- a/lib/ai/mcp/rhai/cmd/main.v
+++ b/lib/ai/mcp/rhai/cmd/main.v
@@ -9,7 +9,7 @@ fn main() {
log.error('Failed to create MCP server: ${err}')
return
}
-
+
// Start the server
server.start() or {
log.error('Failed to start MCP server: ${err}')
diff --git a/lib/ai/mcp/rhai/example/example copy.vsh b/lib/ai/mcp/rhai/example/example copy.vsh
index 076be927..04b1fc36 100644
--- a/lib/ai/mcp/rhai/example/example copy.vsh
+++ b/lib/ai/mcp/rhai/example/example copy.vsh
@@ -4,163 +4,175 @@ import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
import os
fn main() {
- // Get the current directory
- current_dir := os.dir(@FILE)
-
- // Check if a source code path was provided as an argument
- if os.args.len < 2 {
- println('Please provide the path to the source code directory as an argument')
- println('Example: ./example.vsh /path/to/source/code/directory')
- return
- }
-
- // Get the source code path from the command line arguments
- source_code_path := os.args[1]
-
- // Check if the path exists and is a directory
- if !os.exists(source_code_path) {
- println('Source code path does not exist: ${source_code_path}')
- return
- }
-
- if !os.is_dir(source_code_path) {
- println('Source code path is not a directory: ${source_code_path}')
- return
- }
-
- // Get all Rust files in the directory
- files := os.ls(source_code_path) or {
- println('Failed to list files in directory: ${err}')
- return
- }
-
- // Combine all Rust files into a single source code string
- mut source_code := ''
- for file in files {
- file_path := os.join_path(source_code_path, file)
-
- // Skip directories and non-Rust files
- if os.is_dir(file_path) || !file.ends_with('.rs') {
- continue
- }
-
- // Read the file content
- file_content := os.read_file(file_path) or {
- println('Failed to read file ${file_path}: ${err}')
- continue
- }
-
- // Add file content to the combined source code
- source_code += '// File: ${file}\n${file_content}\n\n'
- }
-
- if source_code == '' {
- println('No Rust files found in directory: ${source_code_path}')
- return
- }
-
- // Read the rhaiwrapping.md file
- rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
- println('Failed to read rhaiwrapping.md: ${err}')
- return
- }
-
- // Determine the crate path from the source code path
- // Extract the path relative to the src directory
- src_index := source_code_path.index('src/') or {
- println('Could not determine crate path: src/ not found in path')
- return
- }
-
- mut path_parts := source_code_path[src_index+4..].split('/')
- // Remove the last part (the file name)
- if path_parts.len > 0 {
- path_parts.delete_last()
- }
- rel_path := path_parts.join('::')
- crate_path := 'sal::${rel_path}'
-
- // Create a new task
- mut task := escalayer.new_task(
- name: 'rhai_wrapper_creator.escalayer'
- description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
- )
-
- // Create model configs
- sonnet_model := escalayer.ModelConfig{
- name: 'anthropic/claude-3.7-sonnet'
- provider: 'anthropic'
- temperature: 0.7
- max_tokens: 25000
- }
-
- gpt4_model := escalayer.ModelConfig{
- name: 'gpt-4'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 25000
- }
-
- // Extract the module name from the directory path (last component)
- dir_parts := source_code_path.split('/')
- name := dir_parts[dir_parts.len - 1]
-
- // Create the prompt with source code, wrapper example, and rhai_wrapping_md
- prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or { '' }, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or { '' }, crate_path)
-
- // Create a prompt function that returns the prepared content
- prompt_function := fn [prompt_content] (input string) string {
- return prompt_content
- }
+ // Get the current directory
+ current_dir := os.dir(@FILE)
- gen := RhaiGen{
- name: name
- dir: source_code_path
- }
-
- // Define a single unit task that handles everything
- task.new_unit_task(
- name: 'create_rhai_wrappers'
- prompt_function: prompt_function
- callback_function: gen.process_rhai_wrappers
- base_model: sonnet_model
- retry_model: gpt4_model
- retry_count: 1
- )
-
- // Initiate the task
- result := task.initiate('') or {
- println('Task failed: ${err}')
- return
- }
-
- println('Task completed successfully')
- println('The wrapper files have been generated and compiled in the target directory.')
- println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
+ // Check if a source code path was provided as an argument
+ if os.args.len < 2 {
+ println('Please provide the path to the source code directory as an argument')
+ println('Example: ./example.vsh /path/to/source/code/directory')
+ return
+ }
+
+ // Get the source code path from the command line arguments
+ source_code_path := os.args[1]
+
+ // Check if the path exists and is a directory
+ if !os.exists(source_code_path) {
+ println('Source code path does not exist: ${source_code_path}')
+ return
+ }
+
+ if !os.is_dir(source_code_path) {
+ println('Source code path is not a directory: ${source_code_path}')
+ return
+ }
+
+ // Get all Rust files in the directory
+ files := os.ls(source_code_path) or {
+ println('Failed to list files in directory: ${err}')
+ return
+ }
+
+ // Combine all Rust files into a single source code string
+ mut source_code := ''
+ for file in files {
+ file_path := os.join_path(source_code_path, file)
+
+ // Skip directories and non-Rust files
+ if os.is_dir(file_path) || !file.ends_with('.rs') {
+ continue
+ }
+
+ // Read the file content
+ file_content := os.read_file(file_path) or {
+ println('Failed to read file ${file_path}: ${err}')
+ continue
+ }
+
+ // Add file content to the combined source code
+ source_code += '// File: ${file}\n${file_content}\n\n'
+ }
+
+ if source_code == '' {
+ println('No Rust files found in directory: ${source_code_path}')
+ return
+ }
+
+ // Read the rhaiwrapping.md file
+ rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
+ println('Failed to read rhaiwrapping.md: ${err}')
+ return
+ }
+
+ // Determine the crate path from the source code path
+ // Extract the path relative to the src directory
+ src_index := source_code_path.index('src/') or {
+ println('Could not determine crate path: src/ not found in path')
+ return
+ }
+
+ mut path_parts := source_code_path[src_index + 4..].split('/')
+ // Remove the last part (the file name)
+ if path_parts.len > 0 {
+ path_parts.delete_last()
+ }
+ rel_path := path_parts.join('::')
+ crate_path := 'sal::${rel_path}'
+
+ // Create a new task
+ mut task := escalayer.new_task(
+ name: 'rhai_wrapper_creator.escalayer'
+ description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
+ )
+
+ // Create model configs
+ sonnet_model := escalayer.ModelConfig{
+ name: 'anthropic/claude-3.7-sonnet'
+ provider: 'anthropic'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ gpt4_model := escalayer.ModelConfig{
+ name: 'gpt-4'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ // Extract the module name from the directory path (last component)
+ dir_parts := source_code_path.split('/')
+ name := dir_parts[dir_parts.len - 1]
+
+ // Create the prompt with source code, wrapper example, and rhai_wrapping_md
+ prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or {
+ ''
+ }, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or {
+ ''
+ }, crate_path)
+
+ // Create a prompt function that returns the prepared content
+ prompt_function := fn [prompt_content] (input string) string {
+ return prompt_content
+ }
+
+ gen := RhaiGen{
+ name: name
+ dir: source_code_path
+ }
+
+ // Define a single unit task that handles everything
+ task.new_unit_task(
+ name: 'create_rhai_wrappers'
+ prompt_function: prompt_function
+ callback_function: gen.process_rhai_wrappers
+ base_model: sonnet_model
+ retry_model: gpt4_model
+ retry_count: 1
+ )
+
+ // Initiate the task
+ result := task.initiate('') or {
+ println('Task failed: ${err}')
+ return
+ }
+
+ println('Task completed successfully')
+ println('The wrapper files have been generated and compiled in the target directory.')
+ println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
}
// Define the prompt functions
fn separate_functions(input string) string {
- return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}'
+ return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}'
}
fn create_wrappers(input string) string {
- return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}'
+ return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}'
}
fn create_example(input string) string {
- return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}'
+ return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}'
}
// Define a Rhai wrapper generator function for Container functions
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
- guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or { panic('Failed to read guides') }
- engine := $tmpl('./prompts/engine.md')
- vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or { panic('Failed to read guides') }
- rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or { panic('Failed to read guides') }
- rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or { panic('Failed to read guides') }
- generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
- return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
+ guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or {
+ panic('Failed to read guides')
+ }
+ engine := $tmpl('./prompts/engine.md')
+ vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or {
+ panic('Failed to read guides')
+ }
+ rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or {
+ panic('Failed to read guides')
+ }
+ rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or {
+ panic('Failed to read guides')
+ }
+ generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
+ return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
${guides}
${vector_vs_array}
${example_rhai}
@@ -267,263 +279,254 @@ your engine create function is called `create_rhai_engine`
@[params]
pub struct WrapperModule {
pub:
- lib_rs string
- example_rs string
- engine_rs string
- cargo_toml string
- example_rhai string
- generic_wrapper_rs string
- wrapper_rs string
+ lib_rs string
+ example_rs string
+ engine_rs string
+ cargo_toml string
+ example_rhai string
+ generic_wrapper_rs string
+ wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
-fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string)! string {
- // Define project directory paths
- name := name_
- project_dir := '${base_dir}/rhai'
-
- // Create the project using cargo new --lib
- if os.exists(project_dir) {
- os.rmdir_all(project_dir) or {
- return error('Failed to clean existing project directory: ${err}')
- }
- }
-
- // Run cargo new --lib to create the project
- os.chdir(base_dir) or {
- return error('Failed to change directory to base directory: ${err}')
- }
-
- cargo_new_result := os.execute('cargo new --lib rhai')
- if cargo_new_result.exit_code != 0 {
- return error('Failed to create new library project: ${cargo_new_result.output}')
- }
-
- // Create examples directory
- examples_dir := '${project_dir}/examples'
- os.mkdir_all(examples_dir) or {
- return error('Failed to create examples directory: ${err}')
- }
-
- // Write the lib.rs file
- if wrapper.lib_rs != '' {
- os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
- return error('Failed to write lib.rs: ${err}')
- }
- }
+fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
+ // Define project directory paths
+ name := name_
+ project_dir := '${base_dir}/rhai'
- // Write the wrapper.rs file
- if wrapper.wrapper_rs != '' {
- os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
- return error('Failed to write wrapper.rs: ${err}')
- }
- }
-
- // Write the generic wrapper.rs file
- if wrapper.generic_wrapper_rs != '' {
- os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
- return error('Failed to write generic wrapper.rs: ${err}')
- }
- }
-
- // Write the example.rs file
- if wrapper.example_rs != '' {
- os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
- return error('Failed to write example.rs: ${err}')
- }
- }
-
- // Write the engine.rs file if provided
- if wrapper.engine_rs != '' {
- os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
- return error('Failed to write engine.rs: ${err}')
- }
- }
-
- // Write the Cargo.toml file
- if wrapper.cargo_toml != '' {
- os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
- return error('Failed to write Cargo.toml: ${err}')
- }
- }
-
- // Write the example.rhai file if provided
- if wrapper.example_rhai != '' {
- os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
- return error('Failed to write example.rhai: ${err}')
- }
- }
-
- return project_dir
+ // Create the project using cargo new --lib
+ if os.exists(project_dir) {
+ os.rmdir_all(project_dir) or {
+ return error('Failed to clean existing project directory: ${err}')
+ }
+ }
+
+ // Run cargo new --lib to create the project
+ os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
+
+ cargo_new_result := os.execute('cargo new --lib rhai')
+ if cargo_new_result.exit_code != 0 {
+ return error('Failed to create new library project: ${cargo_new_result.output}')
+ }
+
+ // Create examples directory
+ examples_dir := '${project_dir}/examples'
+ os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
+
+ // Write the lib.rs file
+ if wrapper.lib_rs != '' {
+ os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
+ return error('Failed to write lib.rs: ${err}')
+ }
+ }
+
+ // Write the wrapper.rs file
+ if wrapper.wrapper_rs != '' {
+ os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
+ return error('Failed to write wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the generic wrapper.rs file
+ if wrapper.generic_wrapper_rs != '' {
+ os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
+ return error('Failed to write generic wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the example.rs file
+ if wrapper.example_rs != '' {
+ os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
+ return error('Failed to write example.rs: ${err}')
+ }
+ }
+
+ // Write the engine.rs file if provided
+ if wrapper.engine_rs != '' {
+ os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
+ return error('Failed to write engine.rs: ${err}')
+ }
+ }
+
+ // Write the Cargo.toml file
+ if wrapper.cargo_toml != '' {
+ os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
+ return error('Failed to write Cargo.toml: ${err}')
+ }
+ }
+
+ // Write the example.rhai file if provided
+ if wrapper.example_rhai != '' {
+ os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
+ return error('Failed to write example.rhai: ${err}')
+ }
+ }
+
+ return project_dir
}
// Helper function to extract code blocks from the response
fn extract_code_block(response string, identifier string, language string) string {
- // Find the start marker for the code block
- mut start_marker := '```${language}\n// ${identifier}'
- if language == '' {
- start_marker = '```\n// ${identifier}'
- }
-
- start_index := response.index(start_marker) or {
- // Try alternative format
- mut alt_marker := '```${language}\n${identifier}'
- if language == '' {
- alt_marker = '```\n${identifier}'
- }
-
- response.index(alt_marker) or {
- return ''
- }
- }
-
- // Find the end marker
- end_marker := '```'
- end_index := response.index_after(end_marker, start_index + start_marker.len) or {
- return ''
- }
-
- // Extract the content between the markers
- content_start := start_index + start_marker.len
- content := response[content_start..end_index].trim_space()
-
- return content
+ // Find the start marker for the code block
+ mut start_marker := '```${language}\n// ${identifier}'
+ if language == '' {
+ start_marker = '```\n// ${identifier}'
+ }
+
+ start_index := response.index(start_marker) or {
+ // Try alternative format
+ mut alt_marker := '```${language}\n${identifier}'
+ if language == '' {
+ alt_marker = '```\n${identifier}'
+ }
+
+ response.index(alt_marker) or { return '' }
+ }
+
+ // Find the end marker
+ end_marker := '```'
+ end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
+
+ // Extract the content between the markers
+ content_start := start_index + start_marker.len
+ content := response[content_start..end_index].trim_space()
+
+ return content
}
// Extract module name from wrapper code
fn extract_module_name(code string) string {
- lines := code.split('\n')
-
- for line in lines {
- // Look for pub mod or mod declarations
- if line.contains('pub mod ') || line.contains('mod ') {
- // Extract module name
- mut parts := []string{}
- if line.contains('pub mod ') {
- parts = line.split('pub mod ')
- } else {
- parts = line.split('mod ')
- }
-
- if parts.len > 1 {
- // Extract the module name and remove any trailing characters
- mut name := parts[1].trim_space()
- // Remove any trailing { or ; or whitespace
- name = name.trim_right('{').trim_right(';').trim_space()
- if name != '' {
- return name
- }
- }
- }
- }
-
- return ''
+ lines := code.split('\n')
+
+ for line in lines {
+ // Look for pub mod or mod declarations
+ if line.contains('pub mod ') || line.contains('mod ') {
+ // Extract module name
+ mut parts := []string{}
+ if line.contains('pub mod ') {
+ parts = line.split('pub mod ')
+ } else {
+ parts = line.split('mod ')
+ }
+
+ if parts.len > 1 {
+ // Extract the module name and remove any trailing characters
+ mut name := parts[1].trim_space()
+ // Remove any trailing { or ; or whitespace
+ name = name.trim_right('{').trim_right(';').trim_space()
+ if name != '' {
+ return name
+ }
+ }
+ }
+ }
+
+ return ''
}
struct RhaiGen {
- name string
- dir string
+ name string
+ dir string
}
// Define the callback function that processes the response and compiles the code
-fn (gen RhaiGen)process_rhai_wrappers(response string)! string {
- // Extract wrapper.rs content
- wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
- if wrapper_rs_content == '' {
- return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
- }
-
- // Extract engine.rs content
- mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
- if engine_rs_content == '' {
- // Try to extract from the response without explicit language marker
- engine_rs_content = extract_code_block(response, 'engine.rs', '')
- // if engine_rs_content == '' {
- // // Use the template engine.rs
- // engine_rs_content = $tmpl('./templates/engine.rs')
- // }
- }
-
- // Extract example.rhai content
- mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- // Try to extract from the response without explicit language marker
- example_rhai_content = extract_code_block(response, 'example.rhai', '')
- if example_rhai_content == '' {
- // Use the example from the template
- example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
- return error('Failed to read example.rhai template: ${err}')
- }
-
- // Extract the code block from the markdown file
- example_rhai_content = extract_code_block(example_script_md, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- return error('Failed to extract example.rhai from template file')
- }
- }
- }
-
- // Extract function names from the wrapper.rs content
- functions := extract_functions_from_code(wrapper_rs_content)
-
- println('Using module name: ${gen.name}_rhai')
- println('Extracted functions: ${functions.join(", ")}')
-
- name := gen.name
- // Create a WrapperModule struct with the extracted content
- wrapper := WrapperModule{
- lib_rs: $tmpl('./templates/lib.rs')
- wrapper_rs: wrapper_rs_content
- example_rs: $tmpl('./templates/example.rs')
- engine_rs: engine_rs_content
- generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
- cargo_toml: $tmpl('./templates/cargo.toml')
- example_rhai: example_rhai_content
- }
-
- // Create the wrapper module
- base_target_dir := gen.dir
- project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or {
- return error('Failed to create wrapper module: ${err}')
- }
-
- // Run the example
- os.chdir(project_dir) or {
- return error('Failed to change directory to project: ${err}')
- }
-
- // Run cargo build first
- build_result := os.execute('cargo build')
- if build_result.exit_code != 0 {
- return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
- }
-
- // Run the example
- run_result := os.execute('cargo run --example example')
-
- return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
+fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
+ // Extract wrapper.rs content
+ wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
+ if wrapper_rs_content == '' {
+ return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
+ }
+
+ // Extract engine.rs content
+ mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
+ if engine_rs_content == '' {
+ // Try to extract from the response without explicit language marker
+ engine_rs_content = extract_code_block(response, 'engine.rs', '')
+ // if engine_rs_content == '' {
+ // // Use the template engine.rs
+ // engine_rs_content = $tmpl('./templates/engine.rs')
+ // }
+ }
+
+ // Extract example.rhai content
+ mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
+ if example_rhai_content == '' {
+ // Try to extract from the response without explicit language marker
+ example_rhai_content = extract_code_block(response, 'example.rhai', '')
+ if example_rhai_content == '' {
+ // Use the example from the template
+ example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
+ return error('Failed to read example.rhai template: ${err}')
+ }
+
+ // Extract the code block from the markdown file
+ example_rhai_content = extract_code_block(example_script_md, 'example.rhai',
+ 'rhai')
+ if example_rhai_content == '' {
+ return error('Failed to extract example.rhai from template file')
+ }
+ }
+ }
+
+ // Extract function names from the wrapper.rs content
+ functions := extract_functions_from_code(wrapper_rs_content)
+
+ println('Using module name: ${gen.name}_rhai')
+ println('Extracted functions: ${functions.join(', ')}')
+
+ name := gen.name
+ // Create a WrapperModule struct with the extracted content
+ wrapper := WrapperModule{
+ lib_rs: $tmpl('./templates/lib.rs')
+ wrapper_rs: wrapper_rs_content
+ example_rs: $tmpl('./templates/example.rs')
+ engine_rs: engine_rs_content
+ generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
+ cargo_toml: $tmpl('./templates/cargo.toml')
+ example_rhai: example_rhai_content
+ }
+
+ // Create the wrapper module
+ base_target_dir := gen.dir
+ project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or {
+ return error('Failed to create wrapper module: ${err}')
+ }
+
+ // Run the example
+ os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
+
+ // Run cargo build first
+ build_result := os.execute('cargo build')
+ if build_result.exit_code != 0 {
+ return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
+ }
+
+ // Run the example
+ run_result := os.execute('cargo run --example example')
+
+ return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
}
// Extract function names from wrapper code
fn extract_functions_from_code(code string) []string {
- mut functions := []string{}
- lines := code.split('\n')
-
- for line in lines {
- if line.contains('pub fn ') && !line.contains('//') {
- // Extract function name
- parts := line.split('pub fn ')
- if parts.len > 1 {
- name_parts := parts[1].split('(')
- if name_parts.len > 0 {
- fn_name := name_parts[0].trim_space()
- if fn_name != '' {
- functions << fn_name
- }
- }
- }
- }
- }
-
- return functions
-}
\ No newline at end of file
+ mut functions := []string{}
+ lines := code.split('\n')
+
+ for line in lines {
+ if line.contains('pub fn ') && !line.contains('//') {
+ // Extract function name
+ parts := line.split('pub fn ')
+ if parts.len > 1 {
+ name_parts := parts[1].split('(')
+ if name_parts.len > 0 {
+ fn_name := name_parts[0].trim_space()
+ if fn_name != '' {
+ functions << fn_name
+ }
+ }
+ }
+ }
+ }
+
+ return functions
+}
diff --git a/lib/ai/mcp/rhai/example/example.vsh b/lib/ai/mcp/rhai/example/example.vsh
index 827710cc..c69391a5 100755
--- a/lib/ai/mcp/rhai/example/example.vsh
+++ b/lib/ai/mcp/rhai/example/example.vsh
@@ -4,209 +4,204 @@ import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
import os
fn main() {
- // Get the current directory where this script is located
- current_dir := os.dir(@FILE)
-
- // Validate command line arguments
- source_code_path := validate_command_args() or {
- println(err)
- return
- }
-
- // Read and combine all Rust files in the source directory
- source_code := read_source_code(source_code_path) or {
- println(err)
- return
- }
-
- // Determine the crate path from the source code path
- crate_path := determine_crate_path(source_code_path) or {
- println(err)
- return
- }
-
- // Extract the module name from the directory path (last component)
- name := extract_module_name_from_path(source_code_path)
-
- // Create the prompt content for the AI
- prompt_content := create_rhai_wrappers(
- name,
- source_code,
- read_file_safely('${current_dir}/prompts/example_script.md'),
- read_file_safely('${current_dir}/prompts/wrapper.md'),
- read_file_safely('${current_dir}/prompts/errors.md'),
- crate_path
- )
-
- // Create the generator instance
- gen := RhaiGen{
- name: name
- dir: source_code_path
- }
-
- // Run the task to generate Rhai wrappers
- run_wrapper_generation_task(prompt_content, gen) or {
- println('Task failed: ${err}')
- return
- }
-
- println('Task completed successfully')
- println('The wrapper files have been generated and compiled in the target directory.')
- println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
+ // Get the current directory where this script is located
+ current_dir := os.dir(@FILE)
+
+ // Validate command line arguments
+ source_code_path := validate_command_args() or {
+ println(err)
+ return
+ }
+
+ // Read and combine all Rust files in the source directory
+ source_code := read_source_code(source_code_path) or {
+ println(err)
+ return
+ }
+
+ // Determine the crate path from the source code path
+ crate_path := determine_crate_path(source_code_path) or {
+ println(err)
+ return
+ }
+
+ // Extract the module name from the directory path (last component)
+ name := extract_module_name_from_path(source_code_path)
+
+ // Create the prompt content for the AI
+ prompt_content := create_rhai_wrappers(name, source_code, read_file_safely('${current_dir}/prompts/example_script.md'),
+ read_file_safely('${current_dir}/prompts/wrapper.md'), read_file_safely('${current_dir}/prompts/errors.md'),
+ crate_path)
+
+ // Create the generator instance
+ gen := RhaiGen{
+ name: name
+ dir: source_code_path
+ }
+
+ // Run the task to generate Rhai wrappers
+ run_wrapper_generation_task(prompt_content, gen) or {
+ println('Task failed: ${err}')
+ return
+ }
+
+ println('Task completed successfully')
+ println('The wrapper files have been generated and compiled in the target directory.')
+ println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
}
// Validates command line arguments and returns the source code path
fn validate_command_args() !string {
- if os.args.len < 2 {
- return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory')
- }
-
- source_code_path := os.args[1]
-
- if !os.exists(source_code_path) {
- return error('Source code path does not exist: ${source_code_path}')
- }
-
- if !os.is_dir(source_code_path) {
- return error('Source code path is not a directory: ${source_code_path}')
- }
-
- return source_code_path
+ if os.args.len < 2 {
+ return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory')
+ }
+
+ source_code_path := os.args[1]
+
+ if !os.exists(source_code_path) {
+ return error('Source code path does not exist: ${source_code_path}')
+ }
+
+ if !os.is_dir(source_code_path) {
+ return error('Source code path is not a directory: ${source_code_path}')
+ }
+
+ return source_code_path
}
// Reads and combines all Rust files in the given directory
fn read_source_code(source_code_path string) !string {
- // Get all files in the directory
- files := os.ls(source_code_path) or {
- return error('Failed to list files in directory: ${err}')
- }
-
- // Combine all Rust files into a single source code string
- mut source_code := ''
- for file in files {
- file_path := os.join_path(source_code_path, file)
-
- // Skip directories and non-Rust files
- if os.is_dir(file_path) || !file.ends_with('.rs') {
- continue
- }
-
- // Read the file content
- file_content := os.read_file(file_path) or {
- println('Failed to read file ${file_path}: ${err}')
- continue
- }
-
- // Add file content to the combined source code
- source_code += '// File: ${file}\n${file_content}\n\n'
- }
-
- if source_code == '' {
- return error('No Rust files found in directory: ${source_code_path}')
- }
-
- return source_code
+ // Get all files in the directory
+ files := os.ls(source_code_path) or {
+ return error('Failed to list files in directory: ${err}')
+ }
+
+ // Combine all Rust files into a single source code string
+ mut source_code := ''
+ for file in files {
+ file_path := os.join_path(source_code_path, file)
+
+ // Skip directories and non-Rust files
+ if os.is_dir(file_path) || !file.ends_with('.rs') {
+ continue
+ }
+
+ // Read the file content
+ file_content := os.read_file(file_path) or {
+ println('Failed to read file ${file_path}: ${err}')
+ continue
+ }
+
+ // Add file content to the combined source code
+ source_code += '// File: ${file}\n${file_content}\n\n'
+ }
+
+ if source_code == '' {
+ return error('No Rust files found in directory: ${source_code_path}')
+ }
+
+ return source_code
}
// Determines the crate path from the source code path
fn determine_crate_path(source_code_path string) !string {
- // Extract the path relative to the src directory
- src_index := source_code_path.index('src/') or {
- return error('Could not determine crate path: src/ not found in path')
- }
-
- mut path_parts := source_code_path[src_index+4..].split('/')
- // Remove the last part (the file name)
- if path_parts.len > 0 {
- path_parts.delete_last()
- }
- rel_path := path_parts.join('::')
- return 'sal::${rel_path}'
+ // Extract the path relative to the src directory
+ src_index := source_code_path.index('src/') or {
+ return error('Could not determine crate path: src/ not found in path')
+ }
+
+ mut path_parts := source_code_path[src_index + 4..].split('/')
+ // Remove the last part (the file name)
+ if path_parts.len > 0 {
+ path_parts.delete_last()
+ }
+ rel_path := path_parts.join('::')
+ return 'sal::${rel_path}'
}
// Extracts the module name from a directory path
fn extract_module_name_from_path(path string) string {
- dir_parts := path.split('/')
- return dir_parts[dir_parts.len - 1]
+ dir_parts := path.split('/')
+ return dir_parts[dir_parts.len - 1]
}
// Helper function to read a file or return empty string if file doesn't exist
fn read_file_safely(file_path string) string {
- return os.read_file(file_path) or { '' }
+ return os.read_file(file_path) or { '' }
}
// Runs the task to generate Rhai wrappers
fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
- // Create a new task
- mut task := escalayer.new_task(
- name: 'rhai_wrapper_creator.escalayer'
- description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
- )
-
- // Create model configs
- sonnet_model := escalayer.ModelConfig{
- name: 'anthropic/claude-3.7-sonnet'
- provider: 'anthropic'
- temperature: 0.7
- max_tokens: 25000
- }
-
- gpt4_model := escalayer.ModelConfig{
- name: 'gpt-4'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 25000
- }
-
- // Create a prompt function that returns the prepared content
- prompt_function := fn [prompt_content] (input string) string {
- return prompt_content
- }
-
- // Define a single unit task that handles everything
- task.new_unit_task(
- name: 'create_rhai_wrappers'
- prompt_function: prompt_function
- callback_function: gen.process_rhai_wrappers
- base_model: sonnet_model
- retry_model: gpt4_model
- retry_count: 1
- )
-
- // Initiate the task
- return task.initiate('')
+ // Create a new task
+ mut task := escalayer.new_task(
+ name: 'rhai_wrapper_creator.escalayer'
+ description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
+ )
+
+ // Create model configs
+ sonnet_model := escalayer.ModelConfig{
+ name: 'anthropic/claude-3.7-sonnet'
+ provider: 'anthropic'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ gpt4_model := escalayer.ModelConfig{
+ name: 'gpt-4'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ // Create a prompt function that returns the prepared content
+ prompt_function := fn [prompt_content] (input string) string {
+ return prompt_content
+ }
+
+ // Define a single unit task that handles everything
+ task.new_unit_task(
+ name: 'create_rhai_wrappers'
+ prompt_function: prompt_function
+ callback_function: gen.process_rhai_wrappers
+ base_model: sonnet_model
+ retry_model: gpt4_model
+ retry_count: 1
+ )
+
+ // Initiate the task
+ return task.initiate('')
}
// Define a Rhai wrapper generator function for Container functions
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
- // Load all required template and guide files
- guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')
- engine := $tmpl('./prompts/engine.md')
- vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')
- rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')
- rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')
- generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
-
- // Build the prompt content
- return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md,
- guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide,
- generic_wrapper_rs, engine)
+ // Load all required template and guide files
+ guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')
+ engine := $tmpl('./prompts/engine.md')
+ vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')
+ rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')
+ rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')
+ generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
+
+ // Build the prompt content
+ return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md,
+ guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide, generic_wrapper_rs,
+ engine)
}
// Helper function to load guide files with error handling
fn load_guide_file(path string) string {
- return os.read_file(path) or {
- eprintln('Warning: Failed to read guide file: ${path}')
- return ''
- }
+ return os.read_file(path) or {
+ eprintln('Warning: Failed to read guide file: ${path}')
+ return ''
+ }
}
// Builds the prompt content for the AI
-fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string,
- errors_md string, guides string, vector_vs_array string,
- rhai_integration_fixes string, rhai_syntax_guide string,
- generic_wrapper_rs string, engine string) string {
- return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
+fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string,
+ errors_md string, guides string, vector_vs_array string,
+ rhai_integration_fixes string, rhai_syntax_guide string,
+ generic_wrapper_rs string, engine string) string {
+ return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
${guides}
${vector_vs_array}
${example_rhai}
@@ -313,305 +308,289 @@ your engine create function is called `create_rhai_engine`
@[params]
pub struct WrapperModule {
pub:
- lib_rs string
- example_rs string
- engine_rs string
- cargo_toml string
- example_rhai string
- generic_wrapper_rs string
- wrapper_rs string
+ lib_rs string
+ example_rs string
+ engine_rs string
+ cargo_toml string
+ example_rhai string
+ generic_wrapper_rs string
+ wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
-fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string)! string {
- // Define project directory paths
- name := name_
- project_dir := '${base_dir}/rhai'
-
- // Create the project using cargo new --lib
- if os.exists(project_dir) {
- os.rmdir_all(project_dir) or {
- return error('Failed to clean existing project directory: ${err}')
- }
- }
-
- // Run cargo new --lib to create the project
- os.chdir(base_dir) or {
- return error('Failed to change directory to base directory: ${err}')
- }
-
- cargo_new_result := os.execute('cargo new --lib rhai')
- if cargo_new_result.exit_code != 0 {
- return error('Failed to create new library project: ${cargo_new_result.output}')
- }
-
- // Create examples directory
- examples_dir := '${project_dir}/examples'
- os.mkdir_all(examples_dir) or {
- return error('Failed to create examples directory: ${err}')
- }
-
- // Write the lib.rs file
- if wrapper.lib_rs != '' {
- os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
- return error('Failed to write lib.rs: ${err}')
- }
- }
+fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
+ // Define project directory paths
+ name := name_
+ project_dir := '${base_dir}/rhai'
- // Write the wrapper.rs file
- if wrapper.wrapper_rs != '' {
- os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
- return error('Failed to write wrapper.rs: ${err}')
- }
- }
-
- // Write the generic wrapper.rs file
- if wrapper.generic_wrapper_rs != '' {
- os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
- return error('Failed to write generic wrapper.rs: ${err}')
- }
- }
-
- // Write the example.rs file
- if wrapper.example_rs != '' {
- os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
- return error('Failed to write example.rs: ${err}')
- }
- }
-
- // Write the engine.rs file if provided
- if wrapper.engine_rs != '' {
- os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
- return error('Failed to write engine.rs: ${err}')
- }
- }
-
- // Write the Cargo.toml file
- if wrapper.cargo_toml != '' {
- os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
- return error('Failed to write Cargo.toml: ${err}')
- }
- }
-
- // Write the example.rhai file if provided
- if wrapper.example_rhai != '' {
- os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
- return error('Failed to write example.rhai: ${err}')
- }
- }
-
- return project_dir
+ // Create the project using cargo new --lib
+ if os.exists(project_dir) {
+ os.rmdir_all(project_dir) or {
+ return error('Failed to clean existing project directory: ${err}')
+ }
+ }
+
+ // Run cargo new --lib to create the project
+ os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
+
+ cargo_new_result := os.execute('cargo new --lib rhai')
+ if cargo_new_result.exit_code != 0 {
+ return error('Failed to create new library project: ${cargo_new_result.output}')
+ }
+
+ // Create examples directory
+ examples_dir := '${project_dir}/examples'
+ os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
+
+ // Write the lib.rs file
+ if wrapper.lib_rs != '' {
+ os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
+ return error('Failed to write lib.rs: ${err}')
+ }
+ }
+
+ // Write the wrapper.rs file
+ if wrapper.wrapper_rs != '' {
+ os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
+ return error('Failed to write wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the generic wrapper.rs file
+ if wrapper.generic_wrapper_rs != '' {
+ os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
+ return error('Failed to write generic wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the example.rs file
+ if wrapper.example_rs != '' {
+ os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
+ return error('Failed to write example.rs: ${err}')
+ }
+ }
+
+ // Write the engine.rs file if provided
+ if wrapper.engine_rs != '' {
+ os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
+ return error('Failed to write engine.rs: ${err}')
+ }
+ }
+
+ // Write the Cargo.toml file
+ if wrapper.cargo_toml != '' {
+ os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
+ return error('Failed to write Cargo.toml: ${err}')
+ }
+ }
+
+ // Write the example.rhai file if provided
+ if wrapper.example_rhai != '' {
+ os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
+ return error('Failed to write example.rhai: ${err}')
+ }
+ }
+
+ return project_dir
}
// Helper function to extract code blocks from the response
fn extract_code_block(response string, identifier string, language string) string {
- // Find the start marker for the code block
- mut start_marker := '```${language}\n// ${identifier}'
- if language == '' {
- start_marker = '```\n// ${identifier}'
- }
-
- start_index := response.index(start_marker) or {
- // Try alternative format
- mut alt_marker := '```${language}\n${identifier}'
- if language == '' {
- alt_marker = '```\n${identifier}'
- }
-
- response.index(alt_marker) or {
- return ''
- }
- }
-
- // Find the end marker
- end_marker := '```'
- end_index := response.index_after(end_marker, start_index + start_marker.len) or {
- return ''
- }
-
- // Extract the content between the markers
- content_start := start_index + start_marker.len
- content := response[content_start..end_index].trim_space()
-
- return content
+ // Find the start marker for the code block
+ mut start_marker := '```${language}\n// ${identifier}'
+ if language == '' {
+ start_marker = '```\n// ${identifier}'
+ }
+
+ start_index := response.index(start_marker) or {
+ // Try alternative format
+ mut alt_marker := '```${language}\n${identifier}'
+ if language == '' {
+ alt_marker = '```\n${identifier}'
+ }
+
+ response.index(alt_marker) or { return '' }
+ }
+
+ // Find the end marker
+ end_marker := '```'
+ end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
+
+ // Extract the content between the markers
+ content_start := start_index + start_marker.len
+ content := response[content_start..end_index].trim_space()
+
+ return content
}
// Extract module name from wrapper code
fn extract_module_name(code string) string {
- lines := code.split('\n')
-
- for line in lines {
- // Look for pub mod or mod declarations
- if line.contains('pub mod ') || line.contains('mod ') {
- // Extract module name
- mut parts := []string{}
- if line.contains('pub mod ') {
- parts = line.split('pub mod ')
- } else {
- parts = line.split('mod ')
- }
-
- if parts.len > 1 {
- // Extract the module name and remove any trailing characters
- mut name := parts[1].trim_space()
- // Remove any trailing { or ; or whitespace
- name = name.trim_right('{').trim_right(';').trim_space()
- if name != '' {
- return name
- }
- }
- }
- }
-
- return ''
+ lines := code.split('\n')
+
+ for line in lines {
+ // Look for pub mod or mod declarations
+ if line.contains('pub mod ') || line.contains('mod ') {
+ // Extract module name
+ mut parts := []string{}
+ if line.contains('pub mod ') {
+ parts = line.split('pub mod ')
+ } else {
+ parts = line.split('mod ')
+ }
+
+ if parts.len > 1 {
+ // Extract the module name and remove any trailing characters
+ mut name := parts[1].trim_space()
+ // Remove any trailing { or ; or whitespace
+ name = name.trim_right('{').trim_right(';').trim_space()
+ if name != '' {
+ return name
+ }
+ }
+ }
+ }
+
+ return ''
}
// RhaiGen struct for generating Rhai wrappers
struct RhaiGen {
- name string
- dir string
+ name string
+ dir string
}
// Process the AI response and compile the generated code
-fn (gen RhaiGen)process_rhai_wrappers(response string)! string {
- // Extract code blocks from the response
- code_blocks := extract_code_blocks(response) or {
- return err
- }
-
- // Extract function names from the wrapper.rs content
- functions := extract_functions_from_code(code_blocks.wrapper_rs)
-
- println('Using module name: ${gen.name}_rhai')
- println('Extracted functions: ${functions.join(", ")}')
-
- name := gen.name
-
- // Create a WrapperModule struct with the extracted content
- wrapper := WrapperModule{
- lib_rs: $tmpl('./templates/lib.rs')
- wrapper_rs: code_blocks.wrapper_rs
- example_rs: $tmpl('./templates/example.rs')
- engine_rs: code_blocks.engine_rs
- generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
- cargo_toml: $tmpl('./templates/cargo.toml')
- example_rhai: code_blocks.example_rhai
- }
-
- // Create the wrapper module
- project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or {
- return error('Failed to create wrapper module: ${err}')
- }
-
- // Build and run the project
- build_output, run_output := build_and_run_project(project_dir) or {
- return err
- }
-
- return format_success_message(project_dir, build_output, run_output)
+fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
+ // Extract code blocks from the response
+ code_blocks := extract_code_blocks(response) or { return err }
+
+ // Extract function names from the wrapper.rs content
+ functions := extract_functions_from_code(code_blocks.wrapper_rs)
+
+ println('Using module name: ${gen.name}_rhai')
+ println('Extracted functions: ${functions.join(', ')}')
+
+ name := gen.name
+
+ // Create a WrapperModule struct with the extracted content
+ wrapper := WrapperModule{
+ lib_rs: $tmpl('./templates/lib.rs')
+ wrapper_rs: code_blocks.wrapper_rs
+ example_rs: $tmpl('./templates/example.rs')
+ engine_rs: code_blocks.engine_rs
+ generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
+ cargo_toml: $tmpl('./templates/cargo.toml')
+ example_rhai: code_blocks.example_rhai
+ }
+
+ // Create the wrapper module
+ project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or {
+ return error('Failed to create wrapper module: ${err}')
+ }
+
+ // Build and run the project
+ build_output, run_output := build_and_run_project(project_dir) or { return err }
+
+ return format_success_message(project_dir, build_output, run_output)
}
// CodeBlocks struct to hold extracted code blocks
struct CodeBlocks {
- wrapper_rs string
- engine_rs string
- example_rhai string
+ wrapper_rs string
+ engine_rs string
+ example_rhai string
}
// Extract code blocks from the AI response
-fn extract_code_blocks(response string)! CodeBlocks {
- // Extract wrapper.rs content
- wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
- if wrapper_rs_content == '' {
- return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
- }
-
- // Extract engine.rs content
- mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
- if engine_rs_content == '' {
- // Try to extract from the response without explicit language marker
- engine_rs_content = extract_code_block(response, 'engine.rs', '')
- }
-
- // Extract example.rhai content
- mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- // Try to extract from the response without explicit language marker
- example_rhai_content = extract_code_block(response, 'example.rhai', '')
- if example_rhai_content == '' {
- // Use the example from the template
- example_rhai_content = load_example_from_template() or {
- return err
- }
- }
- }
-
- return CodeBlocks{
- wrapper_rs: wrapper_rs_content
- engine_rs: engine_rs_content
- example_rhai: example_rhai_content
- }
+fn extract_code_blocks(response string) !CodeBlocks {
+ // Extract wrapper.rs content
+ wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
+ if wrapper_rs_content == '' {
+ return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
+ }
+
+ // Extract engine.rs content
+ mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
+ if engine_rs_content == '' {
+ // Try to extract from the response without explicit language marker
+ engine_rs_content = extract_code_block(response, 'engine.rs', '')
+ }
+
+ // Extract example.rhai content
+ mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
+ if example_rhai_content == '' {
+ // Try to extract from the response without explicit language marker
+ example_rhai_content = extract_code_block(response, 'example.rhai', '')
+ if example_rhai_content == '' {
+ // Use the example from the template
+ example_rhai_content = load_example_from_template() or { return err }
+ }
+ }
+
+ return CodeBlocks{
+ wrapper_rs: wrapper_rs_content
+ engine_rs: engine_rs_content
+ example_rhai: example_rhai_content
+ }
}
// Load example.rhai from template file
-fn load_example_from_template()! string {
- example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
- return error('Failed to read example.rhai template: ${err}')
- }
-
- // Extract the code block from the markdown file
- example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- return error('Failed to extract example.rhai from template file')
- }
-
- return example_rhai_content
+fn load_example_from_template() !string {
+ example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
+ return error('Failed to read example.rhai template: ${err}')
+ }
+
+ // Extract the code block from the markdown file
+ example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai')
+ if example_rhai_content == '' {
+ return error('Failed to extract example.rhai from template file')
+ }
+
+ return example_rhai_content
}
// Build and run the project
-fn build_and_run_project(project_dir string)! (string, string) {
- // Change to the project directory
- os.chdir(project_dir) or {
- return error('Failed to change directory to project: ${err}')
- }
-
- // Run cargo build first
- build_result := os.execute('cargo build')
- if build_result.exit_code != 0 {
- return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
- }
-
- // Run the example
- run_result := os.execute('cargo run --example example')
-
- return build_result.output, run_result.output
+fn build_and_run_project(project_dir string) !(string, string) {
+ // Change to the project directory
+ os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
+
+ // Run cargo build first
+ build_result := os.execute('cargo build')
+ if build_result.exit_code != 0 {
+ return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
+ }
+
+ // Run the example
+ run_result := os.execute('cargo run --example example')
+
+ return build_result.output, run_result.output
}
// Format success message
fn format_success_message(project_dir string, build_output string, run_output string) string {
- return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
+ return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
}
// Extract function names from wrapper code
fn extract_functions_from_code(code string) []string {
- mut functions := []string{}
- lines := code.split('\n')
-
- for line in lines {
- if line.contains('pub fn ') && !line.contains('//') {
- // Extract function name
- parts := line.split('pub fn ')
- if parts.len > 1 {
- name_parts := parts[1].split('(')
- if name_parts.len > 0 {
- fn_name := name_parts[0].trim_space()
- if fn_name != '' {
- functions << fn_name
- }
- }
- }
- }
- }
-
- return functions
-}
\ No newline at end of file
+ mut functions := []string{}
+ lines := code.split('\n')
+
+ for line in lines {
+ if line.contains('pub fn ') && !line.contains('//') {
+ // Extract function name
+ parts := line.split('pub fn ')
+ if parts.len > 1 {
+ name_parts := parts[1].split('(')
+ if name_parts.len > 0 {
+ fn_name := name_parts[0].trim_space()
+ if fn_name != '' {
+ functions << fn_name
+ }
+ }
+ }
+ }
+ }
+
+ return functions
+}
diff --git a/lib/ai/mcp/rhai/logic/logic.v b/lib/ai/mcp/rhai/logic/logic.v
index 175cde40..9c6dfd96 100644
--- a/lib/ai/mcp/rhai/logic/logic.v
+++ b/lib/ai/mcp/rhai/logic/logic.v
@@ -6,285 +6,278 @@ import freeflowuniverse.herolib.ai.utils
import os
pub fn generate_rhai_wrapper(name string, source_path string) !string {
- // Detect source package and module information
- source_pkg_info := rust.detect_source_package(source_path)!
- source_code := rust.read_source_code(source_path)!
- prompt := rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
- return run_wrapper_generation_task(prompt, RhaiGen{
- name: name
- dir: source_path
- source_pkg_info: source_pkg_info
- })!
+ // Detect source package and module information
+ source_pkg_info := rust.detect_source_package(source_path)!
+ source_code := rust.read_source_code(source_path)!
+ prompt := rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
+ return run_wrapper_generation_task(prompt, RhaiGen{
+ name: name
+ dir: source_path
+ source_pkg_info: source_pkg_info
+ })!
}
// Runs the task to generate Rhai wrappers
pub fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
- // Create a new task
- mut task := escalayer.new_task(
- name: 'rhai_wrapper_creator.escalayer'
- description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
- )
-
- // Create model configs
- sonnet_model := escalayer.ModelConfig{
- name: 'anthropic/claude-3.7-sonnet'
- provider: 'anthropic'
- temperature: 0.7
- max_tokens: 25000
- }
-
- gpt4_model := escalayer.ModelConfig{
- name: 'gpt-4'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 25000
- }
-
- // Create a prompt function that returns the prepared content
- prompt_function := fn [prompt_content] (input string) string {
- return prompt_content
- }
-
- // Define a single unit task that handles everything
- task.new_unit_task(
- name: 'create_rhai_wrappers'
- prompt_function: prompt_function
- callback_function: gen.process_rhai_wrappers
- base_model: sonnet_model
- retry_model: gpt4_model
- retry_count: 1
- )
-
- // Initiate the task
- return task.initiate('')
+ // Create a new task
+ mut task := escalayer.new_task(
+ name: 'rhai_wrapper_creator.escalayer'
+ description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
+ )
+
+ // Create model configs
+ sonnet_model := escalayer.ModelConfig{
+ name: 'anthropic/claude-3.7-sonnet'
+ provider: 'anthropic'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ gpt4_model := escalayer.ModelConfig{
+ name: 'gpt-4'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ // Create a prompt function that returns the prepared content
+ prompt_function := fn [prompt_content] (input string) string {
+ return prompt_content
+ }
+
+ // Define a single unit task that handles everything
+ task.new_unit_task(
+ name: 'create_rhai_wrappers'
+ prompt_function: prompt_function
+ callback_function: gen.process_rhai_wrappers
+ base_model: sonnet_model
+ retry_model: gpt4_model
+ retry_count: 1
+ )
+
+ // Initiate the task
+ return task.initiate('')
}
// Define a Rhai wrapper generator function for Container functions
pub fn rhai_wrapper_generation_prompt(name string, source_code string, source_pkg_info rust.SourcePackageInfo) !string {
- current_dir := os.dir(@FILE)
- example_rhai := os.read_file('${current_dir}/prompts/example_script.md')!
- wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md')!
- errors_md := os.read_file('${current_dir}/prompts/errors.md')!
-
+ current_dir := os.dir(@FILE)
+ example_rhai := os.read_file('${current_dir}/prompts/example_script.md')!
+ wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md')!
+ errors_md := os.read_file('${current_dir}/prompts/errors.md')!
+
// Load all required template and guide files
- guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')!
- engine := $tmpl('./prompts/engine.md')
- vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')!
- rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')!
- rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')!
- generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
-
- prompt := $tmpl('./prompts/main.md')
+ guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')!
+ engine := $tmpl('./prompts/engine.md')
+ vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')!
+ rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')!
+ rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')!
+ generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
+
+ prompt := $tmpl('./prompts/main.md')
return prompt
}
@[params]
pub struct WrapperModule {
pub:
- lib_rs string
- example_rs string
- engine_rs string
- cargo_toml string
- example_rhai string
- generic_wrapper_rs string
- wrapper_rs string
+ lib_rs string
+ example_rs string
+ engine_rs string
+ cargo_toml string
+ example_rhai string
+ generic_wrapper_rs string
+ wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
-pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string {
-
+pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string) !string {
// Define project directory paths
- project_dir := '${path}/rhai'
-
- // Create the project using cargo new --lib
- if os.exists(project_dir) {
- os.rmdir_all(project_dir) or {
- return error('Failed to clean existing project directory: ${err}')
- }
- }
-
- // Run cargo new --lib to create the project
- os.chdir(path) or {
- return error('Failed to change directory to base directory: ${err}')
- }
-
- cargo_new_result := os.execute('cargo new --lib rhai')
- if cargo_new_result.exit_code != 0 {
- return error('Failed to create new library project: ${cargo_new_result.output}')
- }
-
- // Create examples directory
- examples_dir := '${project_dir}/examples'
- os.mkdir_all(examples_dir) or {
- return error('Failed to create examples directory: ${err}')
- }
-
- // Write the lib.rs file
- if wrapper.lib_rs != '' {
- os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
- return error('Failed to write lib.rs: ${err}')
- }
- } else {
- // Use default lib.rs template if none provided
- lib_rs_content := $tmpl('./templates/lib.rs')
- os.write_file('${project_dir}/src/lib.rs', lib_rs_content) or {
- return error('Failed to write lib.rs: ${err}')
- }
- }
+ project_dir := '${path}/rhai'
- // Write the wrapper.rs file
- if wrapper.wrapper_rs != '' {
- os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
- return error('Failed to write wrapper.rs: ${err}')
- }
- }
-
- // Write the generic wrapper.rs file
- if wrapper.generic_wrapper_rs != '' {
- os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
- return error('Failed to write generic wrapper.rs: ${err}')
- }
- }
-
- // Write the example.rs file
- if wrapper.example_rs != '' {
- os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
- return error('Failed to write example.rs: ${err}')
- }
- } else {
- // Use default example.rs template if none provided
- example_rs_content := $tmpl('./templates/example.rs')
- os.write_file('${examples_dir}/example.rs', example_rs_content) or {
- return error('Failed to write example.rs: ${err}')
- }
- }
-
- // Write the engine.rs file if provided
- if wrapper.engine_rs != '' {
- os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
- return error('Failed to write engine.rs: ${err}')
- }
- }
-
- // Write the Cargo.toml file
- os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
- return error('Failed to write Cargo.toml: ${err}')
- }
-
- // Write the example.rhai file
- os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
- return error('Failed to write example.rhai: ${err}')
- }
-
- return project_dir
+ // Create the project using cargo new --lib
+ if os.exists(project_dir) {
+ os.rmdir_all(project_dir) or {
+ return error('Failed to clean existing project directory: ${err}')
+ }
+ }
+
+ // Run cargo new --lib to create the project
+ os.chdir(path) or { return error('Failed to change directory to base directory: ${err}') }
+
+ cargo_new_result := os.execute('cargo new --lib rhai')
+ if cargo_new_result.exit_code != 0 {
+ return error('Failed to create new library project: ${cargo_new_result.output}')
+ }
+
+ // Create examples directory
+ examples_dir := '${project_dir}/examples'
+ os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
+
+ // Write the lib.rs file
+ if wrapper.lib_rs != '' {
+ os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
+ return error('Failed to write lib.rs: ${err}')
+ }
+ } else {
+ // Use default lib.rs template if none provided
+ lib_rs_content := $tmpl('./templates/lib.rs')
+ os.write_file('${project_dir}/src/lib.rs', lib_rs_content) or {
+ return error('Failed to write lib.rs: ${err}')
+ }
+ }
+
+ // Write the wrapper.rs file
+ if wrapper.wrapper_rs != '' {
+ os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
+ return error('Failed to write wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the generic wrapper.rs file
+ if wrapper.generic_wrapper_rs != '' {
+ os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
+ return error('Failed to write generic wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the example.rs file
+ if wrapper.example_rs != '' {
+ os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
+ return error('Failed to write example.rs: ${err}')
+ }
+ } else {
+ // Use default example.rs template if none provided
+ example_rs_content := $tmpl('./templates/example.rs')
+ os.write_file('${examples_dir}/example.rs', example_rs_content) or {
+ return error('Failed to write example.rs: ${err}')
+ }
+ }
+
+ // Write the engine.rs file if provided
+ if wrapper.engine_rs != '' {
+ os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
+ return error('Failed to write engine.rs: ${err}')
+ }
+ }
+
+ // Write the Cargo.toml file
+ os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
+ return error('Failed to write Cargo.toml: ${err}')
+ }
+
+ // Write the example.rhai file
+ os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
+ return error('Failed to write example.rhai: ${err}')
+ }
+
+ return project_dir
}
-
-
// Extract module name from wrapper code
fn extract_module_name(code string) string {
- lines := code.split('\n')
-
- for line in lines {
- // Look for pub mod or mod declarations
- if line.contains('pub mod ') || line.contains('mod ') {
- // Extract module name
- mut parts := []string{}
- if line.contains('pub mod ') {
- parts = line.split('pub mod ')
- } else {
- parts = line.split('mod ')
- }
-
- if parts.len > 1 {
- // Extract the module name and remove any trailing characters
- mut name := parts[1].trim_space()
- // Remove any trailing { or ; or whitespace
- name = name.trim_right('{').trim_right(';').trim_space()
- if name != '' {
- return name
- }
- }
- }
- }
-
- return ''
+ lines := code.split('\n')
+
+ for line in lines {
+ // Look for pub mod or mod declarations
+ if line.contains('pub mod ') || line.contains('mod ') {
+ // Extract module name
+ mut parts := []string{}
+ if line.contains('pub mod ') {
+ parts = line.split('pub mod ')
+ } else {
+ parts = line.split('mod ')
+ }
+
+ if parts.len > 1 {
+ // Extract the module name and remove any trailing characters
+ mut name := parts[1].trim_space()
+ // Remove any trailing { or ; or whitespace
+ name = name.trim_right('{').trim_right(';').trim_space()
+ if name != '' {
+ return name
+ }
+ }
+ }
+ }
+
+ return ''
}
// RhaiGen struct for generating Rhai wrappers
struct RhaiGen {
- name string
- dir string
- source_pkg_info rust.SourcePackageInfo
+ name string
+ dir string
+ source_pkg_info rust.SourcePackageInfo
}
// Process the AI response and compile the generated code
pub fn (gen RhaiGen) process_rhai_wrappers(input string) !string {
- blocks := extract_code_blocks(input)!
- source_pkg_info := gen.source_pkg_info
- // Create the module structure
- mod := WrapperModule{
- lib_rs: blocks.lib_rs
- engine_rs: blocks.engine_rs
- example_rhai: blocks.example_rhai
- generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
- wrapper_rs: blocks.wrapper_rs
- }
-
- // Write the module files
- project_dir := write_rhai_wrapper_module(mod, gen.name, gen.dir)!
-
- return project_dir
+ blocks := extract_code_blocks(input)!
+ source_pkg_info := gen.source_pkg_info
+ // Create the module structure
+ mod := WrapperModule{
+ lib_rs: blocks.lib_rs
+ engine_rs: blocks.engine_rs
+ example_rhai: blocks.example_rhai
+ generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
+ wrapper_rs: blocks.wrapper_rs
+ }
+
+ // Write the module files
+ project_dir := write_rhai_wrapper_module(mod, gen.name, gen.dir)!
+
+ return project_dir
}
// CodeBlocks struct to hold extracted code blocks
struct CodeBlocks {
- wrapper_rs string
- engine_rs string
- example_rhai string
- lib_rs string
+ wrapper_rs string
+ engine_rs string
+ example_rhai string
+ lib_rs string
}
// Extract code blocks from the AI response
-fn extract_code_blocks(response string)! CodeBlocks {
- // Extract wrapper.rs content
- wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
- if wrapper_rs_content == '' {
- return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
- }
-
- // Extract engine.rs content
- mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
- if engine_rs_content == '' {
- // Try to extract from the response without explicit language marker
- engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
- }
-
- // Extract example.rhai content
- mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- // Try to extract from the response without explicit language marker
- example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
- if example_rhai_content == '' {
- return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
- }
- }
-
- // Extract lib.rs content
- lib_rs_content := utils.extract_code_block(response, 'lib.rs', 'rust')
- if lib_rs_content == '' {
- return error('Failed to extract lib.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// lib.rs and ends with ```')
- }
-
- return CodeBlocks{
- wrapper_rs: wrapper_rs_content
- engine_rs: engine_rs_content
- example_rhai: example_rhai_content
- lib_rs: lib_rs_content
- }
+fn extract_code_blocks(response string) !CodeBlocks {
+ // Extract wrapper.rs content
+ wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
+ if wrapper_rs_content == '' {
+ return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
+ }
+
+ // Extract engine.rs content
+ mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
+ if engine_rs_content == '' {
+ // Try to extract from the response without explicit language marker
+ engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
+ }
+
+ // Extract example.rhai content
+ mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
+ if example_rhai_content == '' {
+ // Try to extract from the response without explicit language marker
+ example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
+ if example_rhai_content == '' {
+ return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
+ }
+ }
+
+ // Extract lib.rs content
+ lib_rs_content := utils.extract_code_block(response, 'lib.rs', 'rust')
+ if lib_rs_content == '' {
+ return error('Failed to extract lib.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// lib.rs and ends with ```')
+ }
+
+ return CodeBlocks{
+ wrapper_rs: wrapper_rs_content
+ engine_rs: engine_rs_content
+ example_rhai: example_rhai_content
+ lib_rs: lib_rs_content
+ }
}
// Format success message
fn format_success_message(project_dir string, build_output string, run_output string) string {
- return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
+ return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
}
diff --git a/lib/ai/mcp/rhai/logic/logic_sampling.v b/lib/ai/mcp/rhai/logic/logic_sampling.v
index 356b77f0..f5d4aefc 100644
--- a/lib/ai/mcp/rhai/logic/logic_sampling.v
+++ b/lib/ai/mcp/rhai/logic/logic_sampling.v
@@ -20,7 +20,7 @@ import os
// name: 'rhai_wrapper_creator.escalayer'
// description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
// )
-
+
// // Create model configs
// sonnet_model := escalayer.ModelConfig{
// name: 'anthropic/claude-3.7-sonnet'
@@ -28,19 +28,19 @@ import os
// temperature: 0.7
// max_tokens: 25000
// }
-
+
// gpt4_model := escalayer.ModelConfig{
// name: 'gpt-4'
// provider: 'openai'
// temperature: 0.7
// max_tokens: 25000
// }
-
+
// // Create a prompt function that returns the prepared content
// prompt_function := fn [prompt_content] (input string) string {
// return prompt_content
// }
-
+
// // Define a single unit task that handles everything
// task.new_unit_task(
// name: 'create_rhai_wrappers'
@@ -50,7 +50,7 @@ import os
// retry_model: gpt4_model
// retry_count: 1
// )
-
+
// // Initiate the task
// return task.initiate('')
// }
@@ -69,33 +69,33 @@ import os
// // functions is a list of function names that AI should extract and pass in
// pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string {
-
+
// // Define project directory paths
// project_dir := '${path}/rhai'
-
+
// // Create the project using cargo new --lib
// if os.exists(project_dir) {
// os.rmdir_all(project_dir) or {
// return error('Failed to clean existing project directory: ${err}')
// }
// }
-
+
// // Run cargo new --lib to create the project
// os.chdir(path) or {
// return error('Failed to change directory to base directory: ${err}')
// }
-
+
// cargo_new_result := os.execute('cargo new --lib rhai')
// if cargo_new_result.exit_code != 0 {
// return error('Failed to create new library project: ${cargo_new_result.output}')
// }
-
+
// // Create examples directory
// examples_dir := '${project_dir}/examples'
// os.mkdir_all(examples_dir) or {
// return error('Failed to create examples directory: ${err}')
// }
-
+
// // Write the lib.rs file
// if wrapper.lib_rs != '' {
// os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
@@ -109,47 +109,45 @@ import os
// return error('Failed to write wrapper.rs: ${err}')
// }
// }
-
+
// // Write the generic wrapper.rs file
// if wrapper.generic_wrapper_rs != '' {
// os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
// return error('Failed to write generic wrapper.rs: ${err}')
// }
// }
-
+
// // Write the example.rs file
// if wrapper.example_rs != '' {
// os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
// return error('Failed to write example.rs: ${err}')
// }
// }
-
+
// // Write the engine.rs file if provided
// if wrapper.engine_rs != '' {
// os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
// return error('Failed to write engine.rs: ${err}')
// }
// }
-
+
// // Write the Cargo.toml file
// os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
// return error('Failed to write Cargo.toml: ${err}')
// }
-
+
// // Write the example.rhai file
// os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
// return error('Failed to write example.rhai: ${err}')
// }
-
+
// return project_dir
// }
-
-
// // Extract module name from wrapper code
// fn extract_module_name(code string) string {
// lines := code.split('\n')
-
+
// for line in lines {
// // Look for pub mod or mod declarations
// if line.contains('pub mod ') || line.contains('mod ') {
@@ -160,7 +158,7 @@ import os
// } else {
// parts = line.split('mod ')
// }
-
+
// if parts.len > 1 {
// // Extract the module name and remove any trailing characters
// mut name := parts[1].trim_space()
@@ -172,7 +170,7 @@ import os
// }
// }
// }
-
+
// return ''
// }
@@ -188,9 +186,9 @@ import os
// code_blocks := extract_code_blocks(response) or {
// return err
// }
-
+
// name := gen.name
-
+
// // Create a WrapperModule struct with the extracted content
// wrapper := WrapperModule{
// lib_rs: $tmpl('./templates/lib.rs')
@@ -201,17 +199,17 @@ import os
// cargo_toml: $tmpl('./templates/cargo.toml')
// example_rhai: code_blocks.example_rhai
// }
-
+
// // Create the wrapper module
// project_dir := write_rhai_wrapper_module(wrapper, gen.name, gen.dir) or {
// return error('Failed to create wrapper module: ${err}')
// }
-
+
// // Build and run the project
// build_output, run_output := rust.run_example(project_dir, 'example') or {
// return err
// }
-
+
// return format_success_message(project_dir, build_output, run_output)
// }
@@ -229,14 +227,14 @@ import os
// if wrapper_rs_content == '' {
// return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
// }
-
+
// // Extract engine.rs content
// mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
// if engine_rs_content == '' {
// // Try to extract from the response without explicit language marker
// engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
// }
-
+
// // Extract example.rhai content
// mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
// if example_rhai_content == '' {
@@ -246,7 +244,7 @@ import os
// return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
// }
// }
-
+
// return CodeBlocks{
// wrapper_rs: wrapper_rs_content
// engine_rs: engine_rs_content
diff --git a/lib/ai/mcp/rhai/mcp/command.v b/lib/ai/mcp/rhai/mcp/command.v
index fc8c82d7..956ca1dc 100644
--- a/lib/ai/mcp/rhai/mcp/command.v
+++ b/lib/ai/mcp/rhai/mcp/command.v
@@ -2,17 +2,17 @@ module mcp
import cli
-pub const command := cli.Command{
- sort_flags: true
- name: 'rhai'
+pub const command = cli.Command{
+ sort_flags: true
+ name: 'rhai'
// execute: cmd_mcpgen
description: 'rhai command'
- commands: [
+ commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the Rhai server'
- }
+ },
]
}
@@ -20,4 +20,3 @@ fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server()!
server.start()!
}
-
diff --git a/lib/ai/mcp/rhai/mcp/mcp.v b/lib/ai/mcp/rhai/mcp/mcp.v
index dfdb5c68..730c0a4d 100644
--- a/lib/ai/mcp/rhai/mcp/mcp.v
+++ b/lib/ai/mcp/rhai/mcp/mcp.v
@@ -9,10 +9,10 @@ pub fn new_mcp_server() !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
- tools: {
+ tools: {
'generate_rhai_wrapper': generate_rhai_wrapper_spec
}
- tool_handlers: {
+ tool_handlers: {
'generate_rhai_wrapper': generate_rhai_wrapper_handler
}
prompts: {
@@ -30,4 +30,4 @@ pub fn new_mcp_server() !&mcp.Server {
}
})!
return server
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/rhai/mcp/prompts.v b/lib/ai/mcp/rhai/mcp/prompts.v
index 2805b240..55f12781 100644
--- a/lib/ai/mcp/rhai/mcp/prompts.v
+++ b/lib/ai/mcp/rhai/mcp/prompts.v
@@ -5,39 +5,41 @@ import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.ai.mcp.rhai.logic
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.lang.rust
-import x.json2 as json { Any }
+import x.json2 as json
// Tool definition for the create_rhai_wrapper function
const rhai_wrapper_prompt_spec = mcp.Prompt{
- name: 'rhai_wrapper'
- description: 'provides a prompt for creating Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
- arguments: [
- mcp.PromptArgument{
- name: 'source_path'
- description: 'Path to the source directory'
- required: true
- }
- ]
+ name: 'rhai_wrapper'
+ description: 'provides a prompt for creating Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
+ arguments: [
+ mcp.PromptArgument{
+ name: 'source_path'
+ description: 'Path to the source directory'
+ required: true
+ },
+ ]
}
// Tool handler for the create_rhai_wrapper function
pub fn rhai_wrapper_prompt_handler(arguments []string) ![]mcp.PromptMessage {
source_path := arguments[0]
- // Read and combine all Rust files in the source directory
- source_code := rust.read_source_code(source_path)!
-
- // Extract the module name from the directory path (last component)
- name := rust.extract_module_name_from_path(source_path)
-
-source_pkg_info := rust.detect_source_package(source_path)!
+ // Read and combine all Rust files in the source directory
+ source_code := rust.read_source_code(source_path)!
-result := logic.rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
-return [mcp.PromptMessage{
- role: 'assistant'
- content: mcp.PromptContent{
- typ: 'text'
- text: result
- }
-}]
-}
\ No newline at end of file
+ // Extract the module name from the directory path (last component)
+ name := rust.extract_module_name_from_path(source_path)
+
+ source_pkg_info := rust.detect_source_package(source_path)!
+
+ result := logic.rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
+ return [
+ mcp.PromptMessage{
+ role: 'assistant'
+ content: mcp.PromptContent{
+ typ: 'text'
+ text: result
+ }
+ },
+ ]
+}
diff --git a/lib/ai/mcp/rhai/mcp/specifications.v b/lib/ai/mcp/rhai/mcp/specifications.v
index b4c1ce22..a28447fb 100644
--- a/lib/ai/mcp/rhai/mcp/specifications.v
+++ b/lib/ai/mcp/rhai/mcp/specifications.v
@@ -1,19 +1,19 @@
module mcp
import freeflowuniverse.herolib.ai.mcp
-import x.json2 as json { Any }
+import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import log
const specs = mcp.Tool{
name: 'rhai_interface'
description: 'Add Rhai Interface to Rust Code Files'
- input_schema: jsonschema.Schema{
+ input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string',
- description: 'Path to a .rs file or directory containing .rs files to make rhai interface for',
+ typ: 'string'
+ description: 'Path to a .rs file or directory containing .rs files to make rhai interface for'
})
}
required: ['path']
diff --git a/lib/ai/mcp/rhai/mcp/tools.v b/lib/ai/mcp/rhai/mcp/tools.v
index 974b831c..92f65328 100644
--- a/lib/ai/mcp/rhai/mcp/tools.v
+++ b/lib/ai/mcp/rhai/mcp/tools.v
@@ -8,32 +8,31 @@ import x.json2 as json { Any }
// Tool definition for the generate_rhai_wrapper function
const generate_rhai_wrapper_spec = mcp.Tool{
- name: 'generate_rhai_wrapper'
- description: 'generate_rhai_wrapper receives the name of a V language function string, and the path to the module in which it exists.'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {
- 'name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- }),
- 'source_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- required: ['name', 'source_path']
- }
+ name: 'generate_rhai_wrapper'
+ description: 'generate_rhai_wrapper receives the name of a V language function string, and the path to the module in which it exists.'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'name': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'source_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ required: ['name', 'source_path']
+ }
}
// Tool handler for the generate_rhai_wrapper function
pub fn generate_rhai_wrapper_handler(arguments map[string]Any) !mcp.ToolCallResult {
name := arguments['name'].str()
source_path := arguments['source_path'].str()
- result := logic.generate_rhai_wrapper(name, source_path)
- or {
+ result := logic.generate_rhai_wrapper(name, source_path) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
diff --git a/lib/ai/mcp/rhai/rhai.v b/lib/ai/mcp/rhai/rhai.v
index 28d59606..adc61b86 100644
--- a/lib/ai/mcp/rhai/rhai.v
+++ b/lib/ai/mcp/rhai/rhai.v
@@ -1 +1 @@
-module rhai
\ No newline at end of file
+module rhai
diff --git a/lib/ai/mcp/rust/command.v b/lib/ai/mcp/rust/command.v
index 000c615e..00b5fefa 100644
--- a/lib/ai/mcp/rust/command.v
+++ b/lib/ai/mcp/rust/command.v
@@ -2,16 +2,16 @@ module rust
import cli
-pub const command := cli.Command{
+pub const command = cli.Command{
sort_flags: true
name: 'rust'
description: 'Rust language tools command'
- commands: [
+ commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the Rust MCP server'
- }
+ },
]
}
diff --git a/lib/ai/mcp/rust/generics.v b/lib/ai/mcp/rust/generics.v
index 32a769fc..c9455cb4 100644
--- a/lib/ai/mcp/rust/generics.v
+++ b/lib/ai/mcp/rust/generics.v
@@ -1,6 +1,6 @@
module rust
-import freeflowuniverse.herolib.ai.mcp {ToolContent}
+import freeflowuniverse.herolib.ai.mcp { ToolContent }
pub fn result_to_mcp_tool_contents[T](result T) []ToolContent {
return [result_to_mcp_tool_content[T](result)]
@@ -51,4 +51,4 @@ pub fn array_to_mcp_tool_contents[U](array []U) []ToolContent {
contents << result_to_mcp_tool_content(item)
}
return contents
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/rust/mcp.v b/lib/ai/mcp/rust/mcp.v
index e822d1f6..c8f52cb8 100644
--- a/lib/ai/mcp/rust/mcp.v
+++ b/lib/ai/mcp/rust/mcp.v
@@ -9,40 +9,40 @@ pub fn new_mcp_server() !&mcp.Server {
// Initialize the server with tools and prompts
mut server := mcp.new_server(mcp.MemoryBackend{
- tools: {
+ tools: {
'list_functions_in_file': list_functions_in_file_spec
- 'list_structs_in_file': list_structs_in_file_spec
- 'list_modules_in_dir': list_modules_in_dir_spec
- 'get_import_statement': get_import_statement_spec
+ 'list_structs_in_file': list_structs_in_file_spec
+ 'list_modules_in_dir': list_modules_in_dir_spec
+ 'get_import_statement': get_import_statement_spec
// 'get_module_dependency': get_module_dependency_spec
}
- tool_handlers: {
+ tool_handlers: {
'list_functions_in_file': list_functions_in_file_handler
- 'list_structs_in_file': list_structs_in_file_handler
- 'list_modules_in_dir': list_modules_in_dir_handler
- 'get_import_statement': get_import_statement_handler
+ 'list_structs_in_file': list_structs_in_file_handler
+ 'list_modules_in_dir': list_modules_in_dir_handler
+ 'get_import_statement': get_import_statement_handler
// 'get_module_dependency': get_module_dependency_handler
}
- prompts: {
- 'rust_functions': rust_functions_prompt_spec
- 'rust_structs': rust_structs_prompt_spec
- 'rust_modules': rust_modules_prompt_spec
- 'rust_imports': rust_imports_prompt_spec
+ prompts: {
+ 'rust_functions': rust_functions_prompt_spec
+ 'rust_structs': rust_structs_prompt_spec
+ 'rust_modules': rust_modules_prompt_spec
+ 'rust_imports': rust_imports_prompt_spec
'rust_dependencies': rust_dependencies_prompt_spec
- 'rust_tools_guide': rust_tools_guide_prompt_spec
+ 'rust_tools_guide': rust_tools_guide_prompt_spec
}
prompt_handlers: {
- 'rust_functions': rust_functions_prompt_handler
- 'rust_structs': rust_structs_prompt_handler
- 'rust_modules': rust_modules_prompt_handler
- 'rust_imports': rust_imports_prompt_handler
+ 'rust_functions': rust_functions_prompt_handler
+ 'rust_structs': rust_structs_prompt_handler
+ 'rust_modules': rust_modules_prompt_handler
+ 'rust_imports': rust_imports_prompt_handler
'rust_dependencies': rust_dependencies_prompt_handler
- 'rust_tools_guide': rust_tools_guide_prompt_handler
+ 'rust_tools_guide': rust_tools_guide_prompt_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
server_info: mcp.ServerInfo{
- name: 'rust'
+ name: 'rust'
version: '1.0.0'
}
}
diff --git a/lib/ai/mcp/rust/prompts.v b/lib/ai/mcp/rust/prompts.v
index c0f7f6bf..d6d33b10 100644
--- a/lib/ai/mcp/rust/prompts.v
+++ b/lib/ai/mcp/rust/prompts.v
@@ -2,113 +2,123 @@ module rust
import freeflowuniverse.herolib.ai.mcp
import os
-import x.json2 as json { Any }
+import x.json2 as json
// Prompt specification for Rust functions
const rust_functions_prompt_spec = mcp.Prompt{
- name: 'rust_functions'
+ name: 'rust_functions'
description: 'Provides guidance on working with Rust functions and using the list_functions_in_file tool'
- arguments: []
+ arguments: []
}
// Handler for rust_functions prompt
pub fn rust_functions_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/functions.md')!
-
- return [mcp.PromptMessage{
- role: 'assistant'
- content: mcp.PromptContent{
- typ: 'text'
- text: content
- }
- }]
+
+ return [
+ mcp.PromptMessage{
+ role: 'assistant'
+ content: mcp.PromptContent{
+ typ: 'text'
+ text: content
+ }
+ },
+ ]
}
// Prompt specification for Rust structs
const rust_structs_prompt_spec = mcp.Prompt{
- name: 'rust_structs'
+ name: 'rust_structs'
description: 'Provides guidance on working with Rust structs and using the list_structs_in_file tool'
- arguments: []
+ arguments: []
}
// Handler for rust_structs prompt
pub fn rust_structs_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/structs.md')!
-
- return [mcp.PromptMessage{
- role: 'assistant'
- content: mcp.PromptContent{
- typ: 'text'
- text: content
- }
- }]
+
+ return [
+ mcp.PromptMessage{
+ role: 'assistant'
+ content: mcp.PromptContent{
+ typ: 'text'
+ text: content
+ }
+ },
+ ]
}
// Prompt specification for Rust modules
const rust_modules_prompt_spec = mcp.Prompt{
- name: 'rust_modules'
+ name: 'rust_modules'
description: 'Provides guidance on working with Rust modules and using the list_modules_in_dir tool'
- arguments: []
+ arguments: []
}
// Handler for rust_modules prompt
pub fn rust_modules_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/modules.md')!
-
- return [mcp.PromptMessage{
- role: 'assistant'
- content: mcp.PromptContent{
- typ: 'text'
- text: content
- }
- }]
+
+ return [
+ mcp.PromptMessage{
+ role: 'assistant'
+ content: mcp.PromptContent{
+ typ: 'text'
+ text: content
+ }
+ },
+ ]
}
// Prompt specification for Rust imports
const rust_imports_prompt_spec = mcp.Prompt{
- name: 'rust_imports'
+ name: 'rust_imports'
description: 'Provides guidance on working with Rust imports and using the get_import_statement tool'
- arguments: []
+ arguments: []
}
// Handler for rust_imports prompt
pub fn rust_imports_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/imports.md')!
-
- return [mcp.PromptMessage{
- role: 'assistant'
- content: mcp.PromptContent{
- typ: 'text'
- text: content
- }
- }]
+
+ return [
+ mcp.PromptMessage{
+ role: 'assistant'
+ content: mcp.PromptContent{
+ typ: 'text'
+ text: content
+ }
+ },
+ ]
}
// Prompt specification for Rust dependencies
const rust_dependencies_prompt_spec = mcp.Prompt{
- name: 'rust_dependencies'
+ name: 'rust_dependencies'
description: 'Provides guidance on working with Rust dependencies and using the get_module_dependency tool'
- arguments: []
+ arguments: []
}
// Handler for rust_dependencies prompt
pub fn rust_dependencies_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/dependencies.md')!
-
- return [mcp.PromptMessage{
- role: 'assistant'
- content: mcp.PromptContent{
- typ: 'text'
- text: content
- }
- }]
+
+ return [
+ mcp.PromptMessage{
+ role: 'assistant'
+ content: mcp.PromptContent{
+ typ: 'text'
+ text: content
+ }
+ },
+ ]
}
// Prompt specification for general Rust tools guide
const rust_tools_guide_prompt_spec = mcp.Prompt{
- name: 'rust_tools_guide'
+ name: 'rust_tools_guide'
description: 'Provides a comprehensive guide on all available Rust tools and how to use them'
- arguments: []
+ arguments: []
}
// Handler for rust_tools_guide prompt
@@ -119,26 +129,23 @@ pub fn rust_tools_guide_prompt_handler(arguments []string) ![]mcp.PromptMessage
modules_content := os.read_file('${os.dir(@FILE)}/prompts/modules.md')!
imports_content := os.read_file('${os.dir(@FILE)}/prompts/imports.md')!
dependencies_content := os.read_file('${os.dir(@FILE)}/prompts/dependencies.md')!
-
+
combined_content := '# Rust Language Tools Guide\n\n' +
'This guide provides comprehensive information on working with Rust code using the available tools.\n\n' +
- '## Table of Contents\n\n' +
- '1. [Functions](#functions)\n' +
- '2. [Structs](#structs)\n' +
- '3. [Modules](#modules)\n' +
- '4. [Imports](#imports)\n' +
- '5. [Dependencies](#dependencies)\n\n' +
- '\n' + functions_content + '\n\n' +
- '\n' + structs_content + '\n\n' +
- '\n' + modules_content + '\n\n' +
- '\n' + imports_content + '\n\n' +
- '\n' + dependencies_content
-
- return [mcp.PromptMessage{
- role: 'assistant'
- content: mcp.PromptContent{
- typ: 'text'
- text: combined_content
- }
- }]
+ '## Table of Contents\n\n' + '1. [Functions](#functions)\n' + '2. [Structs](#structs)\n' +
+ '3. [Modules](#modules)\n' + '4. [Imports](#imports)\n' +
+ '5. [Dependencies](#dependencies)\n\n' + '\n' + functions_content +
+ '\n\n' + '\n' + structs_content + '\n\n' +
+ '\n' + modules_content + '\n\n' + '\n' +
+ imports_content + '\n\n' + '\n' + dependencies_content
+
+ return [
+ mcp.PromptMessage{
+ role: 'assistant'
+ content: mcp.PromptContent{
+ typ: 'text'
+ text: combined_content
+ }
+ },
+ ]
}
diff --git a/lib/ai/mcp/rust/tools.v b/lib/ai/mcp/rust/tools.v
index 680ae84b..2e98aac6 100644
--- a/lib/ai/mcp/rust/tools.v
+++ b/lib/ai/mcp/rust/tools.v
@@ -1,111 +1,105 @@
module rust
-import freeflowuniverse.herolib.ai.mcp {ToolContent}
+import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.lang.rust
import freeflowuniverse.herolib.schemas.jsonschema
import x.json2 as json { Any }
// Tool specification for listing functions in a Rust file
const list_functions_in_file_spec = mcp.Tool{
- name: 'list_functions_in_file'
- description: 'Lists all function definitions in a Rust file'
+ name: 'list_functions_in_file'
+ description: 'Lists all function definitions in a Rust file'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
description: 'Path to the Rust file'
})
}
- required: ['file_path']
+ required: ['file_path']
}
}
// Handler for list_functions_in_file
pub fn list_functions_in_file_handler(arguments map[string]Any) !mcp.ToolCallResult {
file_path := arguments['file_path'].str()
- result := rust.list_functions_in_file(file_path) or {
- return mcp.error_tool_call_result(err)
- }
+ result := rust.list_functions_in_file(file_path) or { return mcp.error_tool_call_result(err) }
return mcp.ToolCallResult{
is_error: false
- content: mcp.array_to_mcp_tool_contents[string](result)
+ content: mcp.array_to_mcp_tool_contents[string](result)
}
}
// Tool specification for listing structs in a Rust file
const list_structs_in_file_spec = mcp.Tool{
- name: 'list_structs_in_file'
- description: 'Lists all struct definitions in a Rust file'
+ name: 'list_structs_in_file'
+ description: 'Lists all struct definitions in a Rust file'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
description: 'Path to the Rust file'
})
}
- required: ['file_path']
+ required: ['file_path']
}
}
// Handler for list_structs_in_file
pub fn list_structs_in_file_handler(arguments map[string]Any) !mcp.ToolCallResult {
file_path := arguments['file_path'].str()
- result := rust.list_structs_in_file(file_path) or {
- return mcp.error_tool_call_result(err)
- }
+ result := rust.list_structs_in_file(file_path) or { return mcp.error_tool_call_result(err) }
return mcp.ToolCallResult{
is_error: false
- content: mcp.array_to_mcp_tool_contents[string](result)
+ content: mcp.array_to_mcp_tool_contents[string](result)
}
}
// Tool specification for listing modules in a directory
const list_modules_in_dir_spec = mcp.Tool{
- name: 'list_modules_in_dir'
- description: 'Lists all Rust modules in a directory'
+ name: 'list_modules_in_dir'
+ description: 'Lists all Rust modules in a directory'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
'dir_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
description: 'Path to the directory'
})
}
- required: ['dir_path']
+ required: ['dir_path']
}
}
// Handler for list_modules_in_dir
pub fn list_modules_in_dir_handler(arguments map[string]Any) !mcp.ToolCallResult {
dir_path := arguments['dir_path'].str()
- result := rust.list_modules_in_directory(dir_path) or {
- return mcp.error_tool_call_result(err)
- }
+ result := rust.list_modules_in_directory(dir_path) or { return mcp.error_tool_call_result(err) }
return mcp.ToolCallResult{
is_error: false
- content: mcp.array_to_mcp_tool_contents[string](result)
+ content: mcp.array_to_mcp_tool_contents[string](result)
}
}
// Tool specification for getting an import statement
const get_import_statement_spec = mcp.Tool{
- name: 'get_import_statement'
- description: 'Generates appropriate Rust import statement for a module based on file paths'
+ name: 'get_import_statement'
+ description: 'Generates appropriate Rust import statement for a module based on file paths'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
- 'current_file': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'current_file': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
description: 'Path to the file where the import will be added'
- }),
+ })
'target_module': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
description: 'Path to the target module to be imported'
})
}
- required: ['current_file', 'target_module']
+ required: ['current_file', 'target_module']
}
}
@@ -118,33 +112,33 @@ pub fn get_import_statement_handler(arguments map[string]Any) !mcp.ToolCallResul
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// Tool specification for getting module dependency information
const get_module_dependency_spec = mcp.Tool{
- name: 'get_module_dependency'
- description: 'Gets dependency information for adding a Rust module to a project'
+ name: 'get_module_dependency'
+ description: 'Gets dependency information for adding a Rust module to a project'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
'importer_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
description: 'Path to the file that will import the module'
- }),
- 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ })
+ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
description: 'Path to the module that will be imported'
})
}
- required: ['importer_path', 'module_path']
+ required: ['importer_path', 'module_path']
}
}
struct Tester {
import_statement string
- module_path string
+ module_path string
}
// Handler for get_module_dependency
@@ -157,9 +151,9 @@ pub fn get_module_dependency_handler(arguments map[string]Any) !mcp.ToolCallResu
return mcp.ToolCallResult{
is_error: false
- content: result_to_mcp_tool_contents[Tester](Tester{
+ content: result_to_mcp_tool_contents[Tester](Tester{
import_statement: dependency.import_statement
- module_path: dependency.module_path
+ module_path: dependency.module_path
}) // Return JSON string
}
}
@@ -168,21 +162,21 @@ pub fn get_module_dependency_handler(arguments map[string]Any) !mcp.ToolCallResu
// Specification for get_function_from_file tool
const get_function_from_file_spec = mcp.Tool{
- name: 'get_function_from_file'
- description: 'Get the declaration of a Rust function from a specified file path.'
+ name: 'get_function_from_file'
+ description: 'Get the declaration of a Rust function from a specified file path.'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
- 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
description: 'Path to the Rust file.'
- }),
+ })
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- description: 'Name of the function to retrieve (e.g., \'my_function\' or \'MyStruct::my_method\').'
+ typ: 'string'
+ description: "Name of the function to retrieve (e.g., 'my_function' or 'MyStruct::my_method')."
})
}
- required: ['file_path', 'function_name']
+ required: ['file_path', 'function_name']
}
}
@@ -195,7 +189,7 @@ pub fn get_function_from_file_handler(arguments map[string]Any) !mcp.ToolCallRes
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -203,21 +197,21 @@ pub fn get_function_from_file_handler(arguments map[string]Any) !mcp.ToolCallRes
// Specification for get_function_from_module tool
const get_function_from_module_spec = mcp.Tool{
- name: 'get_function_from_module'
- description: 'Get the declaration of a Rust function from a specified module path (directory or file).'
+ name: 'get_function_from_module'
+ description: 'Get the declaration of a Rust function from a specified module path (directory or file).'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
- 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
description: 'Path to the Rust module directory or file.'
- }),
+ })
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- description: 'Name of the function to retrieve (e.g., \'my_function\' or \'MyStruct::my_method\').'
+ typ: 'string'
+ description: "Name of the function to retrieve (e.g., 'my_function' or 'MyStruct::my_method')."
})
}
- required: ['module_path', 'function_name']
+ required: ['module_path', 'function_name']
}
}
@@ -230,7 +224,7 @@ pub fn get_function_from_module_handler(arguments map[string]Any) !mcp.ToolCallR
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -238,21 +232,21 @@ pub fn get_function_from_module_handler(arguments map[string]Any) !mcp.ToolCallR
// Specification for get_struct_from_file tool
const get_struct_from_file_spec = mcp.Tool{
- name: 'get_struct_from_file'
- description: 'Get the declaration of a Rust struct from a specified file path.'
+ name: 'get_struct_from_file'
+ description: 'Get the declaration of a Rust struct from a specified file path.'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
- 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
description: 'Path to the Rust file.'
- }),
+ })
'struct_name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- description: 'Name of the struct to retrieve (e.g., \'MyStruct\').'
+ typ: 'string'
+ description: "Name of the struct to retrieve (e.g., 'MyStruct')."
})
}
- required: ['file_path', 'struct_name']
+ required: ['file_path', 'struct_name']
}
}
@@ -265,7 +259,7 @@ pub fn get_struct_from_file_handler(arguments map[string]Any) !mcp.ToolCallResul
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -273,21 +267,21 @@ pub fn get_struct_from_file_handler(arguments map[string]Any) !mcp.ToolCallResul
// Specification for get_struct_from_module tool
const get_struct_from_module_spec = mcp.Tool{
- name: 'get_struct_from_module'
- description: 'Get the declaration of a Rust struct from a specified module path (directory or file).'
+ name: 'get_struct_from_module'
+ description: 'Get the declaration of a Rust struct from a specified module path (directory or file).'
input_schema: jsonschema.Schema{
- typ: 'object'
+ typ: 'object'
properties: {
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
description: 'Path to the Rust module directory or file.'
- }),
+ })
'struct_name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- description: 'Name of the struct to retrieve (e.g., \'MyStruct\').'
+ typ: 'string'
+ description: "Name of the struct to retrieve (e.g., 'MyStruct')."
})
}
- required: ['module_path', 'struct_name']
+ required: ['module_path', 'struct_name']
}
}
@@ -300,6 +294,6 @@ pub fn get_struct_from_module_handler(arguments map[string]Any) !mcp.ToolCallRes
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/vcode/cmd/main.v b/lib/ai/mcp/vcode/cmd/main.v
index 30078f81..4a358513 100644
--- a/lib/ai/mcp/vcode/cmd/main.v
+++ b/lib/ai/mcp/vcode/cmd/main.v
@@ -8,7 +8,7 @@ fn main() {
eprintln('Failed to create MCP server: ${err}')
return
}
-
+
// Start the server
server.start() or {
eprintln('Failed to start MCP server: ${err}')
diff --git a/lib/ai/mcp/vcode/logic/server.v b/lib/ai/mcp/vcode/logic/server.v
index d6332932..0d100a9f 100644
--- a/lib/ai/mcp/vcode/logic/server.v
+++ b/lib/ai/mcp/vcode/logic/server.v
@@ -15,11 +15,11 @@ pub fn new_mcp_server(v &VCode) !&mcp.Server {
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
'get_function_from_file': get_function_from_file_tool
- 'write_vfile': write_vfile_tool
+ 'write_vfile': write_vfile_tool
}
tool_handlers: {
'get_function_from_file': v.get_function_from_file_tool_handler
- 'write_vfile': v.write_vfile_tool_handler
+ 'write_vfile': v.write_vfile_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -30,4 +30,4 @@ pub fn new_mcp_server(v &VCode) !&mcp.Server {
}
})!
return server
-}
\ No newline at end of file
+}
diff --git a/lib/ai/mcp/vcode/logic/vlang_tools.v b/lib/ai/mcp/vcode/logic/vlang_tools.v
index b24d3d34..a90e9fb9 100644
--- a/lib/ai/mcp/vcode/logic/vlang_tools.v
+++ b/lib/ai/mcp/vcode/logic/vlang_tools.v
@@ -3,7 +3,7 @@ module vcode
import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.schemas.jsonschema
-import x.json2 {Any}
+import x.json2 { Any }
const get_function_from_file_tool = mcp.Tool{
name: 'get_function_from_file'
@@ -16,10 +16,10 @@ RETURNS: string - the function block including comments, or empty string if not
typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
})
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
})
}
required: ['file_path', 'function_name']
diff --git a/lib/ai/mcp/vcode/logic/write_vfile_tool.v b/lib/ai/mcp/vcode/logic/write_vfile_tool.v
index 543c79d1..172d4e20 100644
--- a/lib/ai/mcp/vcode/logic/write_vfile_tool.v
+++ b/lib/ai/mcp/vcode/logic/write_vfile_tool.v
@@ -3,7 +3,7 @@ module vcode
import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.schemas.jsonschema
-import x.json2 {Any}
+import x.json2 { Any }
const write_vfile_tool = mcp.Tool{
name: 'write_vfile'
@@ -18,20 +18,20 @@ RETURNS: string - success message with the path of the written file'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
- 'path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
})
- 'code': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'code': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
})
- 'format': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'boolean'
+ 'format': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'boolean'
})
'overwrite': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'boolean'
+ typ: 'boolean'
})
- 'prefix': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'prefix': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
})
}
required: ['path', 'code']
@@ -41,31 +41,27 @@ RETURNS: string - success message with the path of the written file'
pub fn (d &VCode) write_vfile_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
code_str := arguments['code'].str()
-
+
// Parse optional parameters with defaults
format := if 'format' in arguments { arguments['format'].bool() } else { false }
overwrite := if 'overwrite' in arguments { arguments['overwrite'].bool() } else { false }
prefix := if 'prefix' in arguments { arguments['prefix'].str() } else { '' }
-
+
// Create write options
options := code.WriteOptions{
- format: format
+ format: format
overwrite: overwrite
- prefix: prefix
+ prefix: prefix
}
-
+
// Parse the V code string into a VFile
- vfile := code.parse_vfile(code_str) or {
- return mcp.error_tool_call_result(err)
- }
-
+ vfile := code.parse_vfile(code_str) or { return mcp.error_tool_call_result(err) }
+
// Write the VFile to the specified path
- vfile.write(path, options) or {
- return mcp.error_tool_call_result(err)
- }
-
+ vfile.write(path, options) or { return mcp.error_tool_call_result(err) }
+
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string]('Successfully wrote V file to ${path}')
+ content: mcp.result_to_mcp_tool_contents[string]('Successfully wrote V file to ${path}')
}
}
diff --git a/lib/ai/mcp/vcode/mcp/handlers.v b/lib/ai/mcp/vcode/mcp/handlers.v
index 6675e521..6aeb6e81 100644
--- a/lib/ai/mcp/vcode/mcp/handlers.v
+++ b/lib/ai/mcp/vcode/mcp/handlers.v
@@ -8,47 +8,47 @@ import os
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
-
+
// Check if path exists
if !os.exists(path) {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
+ content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
}
}
-
+
// Determine if path is a file or directory
is_directory := os.is_dir(path)
-
- mut message := ""
-
+
+ mut message := ''
+
if is_directory {
// Convert all pug files in the directory
pugconvert.convert_pug(path) or {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error converting pug files in directory: ${err}")
+ content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
}
}
message = "Successfully converted all pug files in directory '${path}'"
- } else if path.ends_with(".v") {
+ } else if path.ends_with('.v') {
// Convert a single pug file
pugconvert.convert_pug_file(path) or {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error converting pug file: ${err}")
+ content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
}
}
message = "Successfully converted pug file '${path}'"
} else {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
+ content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
}
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](message)
+ content: mcp.result_to_mcp_tool_contents[string](message)
}
}
diff --git a/lib/ai/mcp/vcode/mcp/specifications.v b/lib/ai/mcp/vcode/mcp/specifications.v
index 1e297ecd..efce2a74 100644
--- a/lib/ai/mcp/vcode/mcp/specifications.v
+++ b/lib/ai/mcp/vcode/mcp/specifications.v
@@ -1,18 +1,18 @@
module pugconvert
import freeflowuniverse.herolib.ai.mcp
-import x.json2 as json { Any }
+import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.ai.mcp.logger
const specs = mcp.Tool{
name: 'pugconvert'
description: 'Convert Pug template files to Jet template files'
- input_schema: jsonschema.Schema{
+ input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string',
+ typ: 'string'
description: 'Path to a .pug file or directory containing .pug files to convert'
})
}
diff --git a/lib/ai/utils/utils.v b/lib/ai/utils/utils.v
index 12a7f893..ba0da511 100644
--- a/lib/ai/utils/utils.v
+++ b/lib/ai/utils/utils.v
@@ -2,33 +2,29 @@ module utils
// Helper function to extract code blocks from the response
pub fn extract_code_block(response string, identifier string, language string) string {
- // Find the start marker for the code block
- mut start_marker := '```${language}\n// ${identifier}'
- if language == '' {
- start_marker = '```\n// ${identifier}'
- }
-
- start_index := response.index(start_marker) or {
- // Try alternative format
- mut alt_marker := '```${language}\n${identifier}'
- if language == '' {
- alt_marker = '```\n${identifier}'
- }
-
- response.index(alt_marker) or {
- return ''
- }
- }
-
- // Find the end marker
- end_marker := '```'
- end_index := response.index_after(end_marker, start_index + start_marker.len) or {
- return ''
- }
-
- // Extract the content between the markers
- content_start := start_index + start_marker.len
- content := response[content_start..end_index].trim_space()
-
- return content
-}
\ No newline at end of file
+ // Find the start marker for the code block
+ mut start_marker := '```${language}\n// ${identifier}'
+ if language == '' {
+ start_marker = '```\n// ${identifier}'
+ }
+
+ start_index := response.index(start_marker) or {
+ // Try alternative format
+ mut alt_marker := '```${language}\n${identifier}'
+ if language == '' {
+ alt_marker = '```\n${identifier}'
+ }
+
+ response.index(alt_marker) or { return '' }
+ }
+
+ // Find the end marker
+ end_marker := '```'
+ end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
+
+ // Extract the content between the markers
+ content_start := start_index + start_marker.len
+ content := response[content_start..end_index].trim_space()
+
+ return content
+}
diff --git a/lib/baobab/generator/generate_actor_source.v b/lib/baobab/generator/generate_actor_source.v
index efb1e947..e29958b6 100644
--- a/lib/baobab/generator/generate_actor_source.v
+++ b/lib/baobab/generator/generate_actor_source.v
@@ -11,8 +11,7 @@ pub fn generate_module_from_openapi(openapi_path string) !string {
openapi_spec := openapi.new(path: openapi_path)!
actor_spec := specification.from_openapi(openapi_spec)!
- actor_module := generator.generate_actor_module(
- actor_spec,
+ actor_module := generate_actor_module(actor_spec,
interfaces: [.openapi, .http]
)!
diff --git a/lib/baobab/generator/generate_methods.v b/lib/baobab/generator/generate_methods.v
index 9fcdca42..07cf3eb2 100644
--- a/lib/baobab/generator/generate_methods.v
+++ b/lib/baobab/generator/generate_methods.v
@@ -1,6 +1,6 @@
module generator
-import freeflowuniverse.herolib.core.code { Array, CodeItem, Function, Import, Param, Result, Struct, VFile }
+import freeflowuniverse.herolib.core.code { CodeItem, Function, Import, Param, Result, Struct, VFile }
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc
@@ -18,12 +18,13 @@ pub struct Source {
}
pub fn generate_methods_file_str(source Source) !string {
- actor_spec := if path := source.openapi_path {
+ actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)!
+ } else {
+ panic('No openapi or openrpc path provided')
}
- else { panic('No openapi or openrpc path provided') }
return generate_methods_file(actor_spec)!.write_str()!
}
diff --git a/lib/baobab/generator/generate_methods_example.v b/lib/baobab/generator/generate_methods_example.v
index 3f303520..4acd32fc 100644
--- a/lib/baobab/generator/generate_methods_example.v
+++ b/lib/baobab/generator/generate_methods_example.v
@@ -10,12 +10,13 @@ import freeflowuniverse.herolib.baobab.specification { ActorMethod, ActorSpecifi
import freeflowuniverse.herolib.schemas.openapi
pub fn generate_methods_example_file_str(source Source) !string {
- actor_spec := if path := source.openapi_path {
+ actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)!
+ } else {
+ panic('No openapi or openrpc path provided')
}
- else { panic('No openapi or openrpc path provided') }
return generate_methods_example_file(actor_spec)!.write_str()!
}
diff --git a/lib/baobab/generator/generate_methods_interface.v b/lib/baobab/generator/generate_methods_interface.v
index 5163d909..ab6b84d1 100644
--- a/lib/baobab/generator/generate_methods_interface.v
+++ b/lib/baobab/generator/generate_methods_interface.v
@@ -8,12 +8,13 @@ import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc
pub fn generate_methods_interface_file_str(source Source) !string {
- actor_spec := if path := source.openapi_path {
+ actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)!
+ } else {
+ panic('No openapi or openrpc path provided')
}
- else { panic('No openapi or openrpc path provided') }
return generate_methods_interface_file(actor_spec)!.write_str()!
}
diff --git a/lib/baobab/generator/generate_model.v b/lib/baobab/generator/generate_model.v
index d8cba78f..7ad7bf66 100644
--- a/lib/baobab/generator/generate_model.v
+++ b/lib/baobab/generator/generate_model.v
@@ -8,12 +8,13 @@ import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc
pub fn generate_model_file_str(source Source) !string {
- actor_spec := if path := source.openapi_path {
+ actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)!
+ } else {
+ panic('No openapi or openrpc path provided')
}
- else { panic('No openapi or openrpc path provided') }
return generate_model_file(actor_spec)!.write_str()!
}
diff --git a/lib/baobab/specification/from_openapi.v b/lib/baobab/specification/from_openapi.v
index a28c7ed4..3506cb97 100644
--- a/lib/baobab/specification/from_openapi.v
+++ b/lib/baobab/specification/from_openapi.v
@@ -3,7 +3,7 @@ module specification
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.code { Struct }
import freeflowuniverse.herolib.schemas.jsonschema { Schema, SchemaRef }
-import freeflowuniverse.herolib.schemas.openapi { MediaType, OpenAPI, Parameter, Operation, OperationInfo }
+import freeflowuniverse.herolib.schemas.openapi { MediaType, OpenAPI, OperationInfo, Parameter }
import freeflowuniverse.herolib.schemas.openrpc { ContentDescriptor, ErrorSpec, Example, ExamplePairing, ExampleRef }
// Helper function: Convert OpenAPI parameter to ContentDescriptor
diff --git a/lib/clients/openai/audio.v b/lib/clients/openai/audio.v
index 79d5c8d3..f5f980e7 100644
--- a/lib/clients/openai/audio.v
+++ b/lib/clients/openai/audio.v
@@ -114,7 +114,7 @@ fn (mut f OpenAI) create_audio_request(args AudioArgs, endpoint string) !AudioRe
@[params]
pub struct CreateSpeechArgs {
pub:
- model string = "tts_1"
+ model string = 'tts_1'
input string @[required]
voice Voice = .alloy
response_format AudioFormat = .mp3
diff --git a/lib/clients/openai/client_test.v b/lib/clients/openai/client_test.v
index 0a1c2bf8..f48a96cb 100644
--- a/lib/clients/openai/client_test.v
+++ b/lib/clients/openai/client_test.v
@@ -9,9 +9,9 @@ fn test_chat_completion() {
println(client.list_models()!)
- raise("sss")
+ raise('sss')
- res := client.chat_completion( Messages{
+ res := client.chat_completion(Messages{
messages: [
Message{
role: .user
diff --git a/lib/clients/openai/completions.v b/lib/clients/openai/completions.v
index d74fdbc1..09e8416c 100644
--- a/lib/clients/openai/completions.v
+++ b/lib/clients/openai/completions.v
@@ -44,31 +44,31 @@ pub mut:
struct ChatMessagesRaw {
mut:
- model string
- messages []MessageRaw
- temperature f64 = 0.5
+ model string
+ messages []MessageRaw
+ temperature f64 = 0.5
max_completion_tokens int = 32000
}
@[params]
-pub struct CompletionArgs{
+pub struct CompletionArgs {
pub mut:
- model string
- msgs Messages
- temperature f64 = 0.5
+ model string
+ msgs Messages
+ temperature f64 = 0.5
max_completion_tokens int = 32000
}
// creates a new chat completion given a list of messages
// each message consists of message content and the role of the author
pub fn (mut f OpenAI) chat_completion(args_ CompletionArgs) !ChatCompletion {
- mut args:=args_
- if args.model==""{
+ mut args := args_
+ if args.model == '' {
args.model = f.model_default
}
mut m := ChatMessagesRaw{
- model: args.model
- temperature: args.temperature
+ model: args.model
+ temperature: args.temperature
max_completion_tokens: args.max_completion_tokens
}
for msg in args.msgs.messages {
diff --git a/lib/clients/openai/openai_factory_.v b/lib/clients/openai/openai_factory_.v
index 301fc4af..6fd469a7 100644
--- a/lib/clients/openai/openai_factory_.v
+++ b/lib/clients/openai/openai_factory_.v
@@ -28,7 +28,9 @@ fn args_get(args_ ArgsGet) ArgsGet {
pub fn get(args_ ArgsGet) !&OpenAI {
mut context := base.context()!
mut args := args_get(args_)
- mut obj := OpenAI{name:args.name}
+ mut obj := OpenAI{
+ name: args.name
+ }
if args.name !in openai_global {
if !exists(args)! {
set(obj)!
diff --git a/lib/clients/openai/openai_model.v b/lib/clients/openai/openai_model.v
index 5cad7870..a8d67cf3 100644
--- a/lib/clients/openai/openai_model.v
+++ b/lib/clients/openai/openai_model.v
@@ -22,44 +22,43 @@ const default = true
@[heap]
pub struct OpenAI {
pub mut:
- name string = 'default'
- api_key string
- url string
+ name string = 'default'
+ api_key string
+ url string
model_default string
- conn ?&httpconnection.HTTPConnection @[skip; str: skip]
+ conn ?&httpconnection.HTTPConnection @[skip; str: skip]
}
// your checking & initialization code if needed
fn obj_init(mycfg_ OpenAI) !OpenAI {
- mut mycfg := mycfg_
- if mycfg.api_key==""{
-
- mut k:=os.getenv('AIKEY')
- if k != ""{
- mycfg.api_key = k
- k=os.getenv('AIURL')
- if k != ""{
+ mut mycfg := mycfg_
+ if mycfg.api_key == '' {
+ mut k := os.getenv('AIKEY')
+ if k != '' {
+ mycfg.api_key = k
+ k = os.getenv('AIURL')
+ if k != '' {
mycfg.url = k
- }else{
- return error("found AIKEY in env, but not AIURL")
- }
- k=os.getenv('AIMODEL')
- if k != ""{
+ } else {
+ return error('found AIKEY in env, but not AIURL')
+ }
+ k = os.getenv('AIMODEL')
+ if k != '' {
mycfg.model_default = k
- }
- return mycfg
- }
- mycfg.url = "https://api.openai.com/v1/models"
- k=os.getenv('OPENAI_API_KEY')
- if k != ""{
- mycfg.api_key = k
- return mycfg
+ }
+ return mycfg
}
- k=os.getenv('OPENROUTER_API_KEY')
- if k != ""{
- mycfg.api_key = k
- mycfg.url = "https://openrouter.ai/api/v1"
- return mycfg
+ mycfg.url = 'https://api.openai.com/v1/models'
+ k = os.getenv('OPENAI_API_KEY')
+ if k != '' {
+ mycfg.api_key = k
+ return mycfg
+ }
+ k = os.getenv('OPENROUTER_API_KEY')
+ if k != '' {
+ mycfg.api_key = k
+ mycfg.url = 'https://openrouter.ai/api/v1'
+ return mycfg
}
}
return mycfg
@@ -75,12 +74,12 @@ pub fn (mut client OpenAI) connection() !&httpconnection.HTTPConnection {
)!
c2
}
+
c.default_header.set(.authorization, 'Bearer ${client.api_key}')
client.conn = c
return c
}
-
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj OpenAI) !string {
diff --git a/lib/core/code/model_file.v b/lib/core/code/model_file.v
index 28ed8da5..1d4bebbd 100644
--- a/lib/core/code/model_file.v
+++ b/lib/core/code/model_file.v
@@ -6,9 +6,9 @@ import freeflowuniverse.herolib.core.pathlib
import os
pub interface IFile {
+ name string
write(string, WriteOptions) !
write_str(WriteOptions) !string
- name string
}
pub struct File {
@@ -124,7 +124,9 @@ pub fn (code VFile) write_str(options WriteOptions) !string {
''
}
- mod_stmt := if code.mod == '' {''} else {
+ mod_stmt := if code.mod == '' {
+ ''
+ } else {
'module ${code.mod}'
}
@@ -169,9 +171,9 @@ pub fn parse_vfile(code string) !VFile {
mut vfile := VFile{
content: code
}
-
+
lines := code.split_into_lines()
-
+
// Extract module name
for line in lines {
trimmed := line.trim_space()
@@ -180,7 +182,7 @@ pub fn parse_vfile(code string) !VFile {
break
}
}
-
+
// Extract imports
for line in lines {
trimmed := line.trim_space()
@@ -189,29 +191,29 @@ pub fn parse_vfile(code string) !VFile {
vfile.imports << import_obj
}
}
-
+
// Extract constants
vfile.consts = parse_consts(code) or { []Const{} }
-
+
// Split code into chunks for parsing structs and functions
mut chunks := []string{}
mut current_chunk := ''
mut brace_count := 0
mut in_struct_or_fn := false
mut comment_block := []string{}
-
+
for line in lines {
trimmed := line.trim_space()
-
+
// Collect comments
if trimmed.starts_with('//') && !in_struct_or_fn {
comment_block << line
continue
}
-
+
// Check for struct or function start
- if (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ') ||
- trimmed.starts_with('fn ') || trimmed.starts_with('pub fn ')) && !in_struct_or_fn {
+ if (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ')
+ || trimmed.starts_with('fn ') || trimmed.starts_with('pub fn ')) && !in_struct_or_fn {
in_struct_or_fn = true
current_chunk = comment_block.join('\n')
if current_chunk != '' {
@@ -219,14 +221,14 @@ pub fn parse_vfile(code string) !VFile {
}
current_chunk += line
comment_block = []string{}
-
+
if line.contains('{') {
brace_count += line.count('{')
}
if line.contains('}') {
brace_count -= line.count('}')
}
-
+
if brace_count == 0 {
// Single line definition
chunks << current_chunk
@@ -235,18 +237,18 @@ pub fn parse_vfile(code string) !VFile {
}
continue
}
-
+
// Add line to current chunk if we're inside a struct or function
if in_struct_or_fn {
current_chunk += '\n' + line
-
+
if line.contains('{') {
brace_count += line.count('{')
}
if line.contains('}') {
brace_count -= line.count('}')
}
-
+
// Check if we've reached the end of the struct or function
if brace_count == 0 {
chunks << current_chunk
@@ -255,11 +257,11 @@ pub fn parse_vfile(code string) !VFile {
}
}
}
-
+
// Parse each chunk and add to items
for chunk in chunks {
trimmed := chunk.trim_space()
-
+
if trimmed.contains('struct ') || trimmed.contains('pub struct ') {
// Parse struct
struct_obj := parse_struct(chunk) or {
@@ -276,6 +278,6 @@ pub fn parse_vfile(code string) !VFile {
vfile.items << fn_obj
}
}
-
+
return vfile
}
diff --git a/lib/core/code/model_file_test.v b/lib/core/code/model_file_test.v
index ca421f4d..7d68c614 100644
--- a/lib/core/code/model_file_test.v
+++ b/lib/core/code/model_file_test.v
@@ -1,7 +1,7 @@
module code
fn test_parse_vfile() {
- code := '
+ code := "
module test
import os
@@ -9,7 +9,7 @@ import strings
import freeflowuniverse.herolib.core.texttools
const (
- VERSION = \'1.0.0\'
+ VERSION = '1.0.0'
DEBUG = true
)
@@ -21,7 +21,7 @@ pub mut:
// greet returns a greeting message
pub fn (p Person) greet() string {
- return \'Hello, my name is \${p.name} and I am \${p.age} years old\'
+ return 'Hello, my name is \${p.name} and I am \${p.age} years old'
}
// create_person creates a new Person instance
@@ -31,7 +31,7 @@ pub fn create_person(name string, age int) Person {
age: age
}
}
-'
+"
vfile := parse_vfile(code) or {
assert false, 'Failed to parse VFile: ${err}'
@@ -50,7 +50,7 @@ pub fn create_person(name string, age int) Person {
// Test constants
assert vfile.consts.len == 2
assert vfile.consts[0].name == 'VERSION'
- assert vfile.consts[0].value == '\'1.0.0\''
+ assert vfile.consts[0].value == "'1.0.0'"
assert vfile.consts[1].name == 'DEBUG'
assert vfile.consts[1].value == 'true'
@@ -68,13 +68,13 @@ pub fn create_person(name string, age int) Person {
// Test functions
functions := vfile.functions()
assert functions.len == 2
-
+
// Test method
assert functions[0].name == 'greet'
assert functions[0].is_pub == true
assert functions[0].receiver.typ.vgen() == 'Person'
assert functions[0].result.typ.vgen() == 'string'
-
+
// Test standalone function
assert functions[1].name == 'create_person'
assert functions[1].is_pub == true
diff --git a/lib/core/code/model_function.v b/lib/core/code/model_function.v
index 5ebf0ec0..3b37ff8e 100644
--- a/lib/core/code/model_function.v
+++ b/lib/core/code/model_function.v
@@ -133,30 +133,30 @@ pub fn parse_function(code_ string) !Function {
// Extract the result type, handling the ! for result types
mut result_type := code.all_after(')').all_before('{').replace(' ', '')
mut has_return := false
-
+
// Check if the result type contains !
if result_type.contains('!') {
has_return = true
result_type = result_type.replace('!', '')
}
-
+
result := new_param(
v: result_type
)!
body := if code.contains('{') { code.all_after('{').all_before_last('}') } else { '' }
-
+
// Process the comments into a description
description := comment_lines.join('\n')
-
+
return Function{
- name: name
- receiver: receiver
- params: params
- result: result
- body: body
+ name: name
+ receiver: receiver
+ params: params
+ result: result
+ body: body
description: description
- is_pub: is_pub
- has_return: has_return
+ is_pub: is_pub
+ has_return: has_return
}
}
diff --git a/lib/core/code/model_function_test.v b/lib/core/code/model_function_test.v
index eae604d1..87b7f524 100644
--- a/lib/core/code/model_function_test.v
+++ b/lib/core/code/model_function_test.v
@@ -2,20 +2,20 @@ module code
fn test_parse_function_with_comments() {
// Test function string with comments
- function_str := '// test_function is a simple function for testing the MCP tool code generation
+ function_str := "// test_function is a simple function for testing the MCP tool code generation
// It takes a config and returns a result
pub fn test_function(config TestConfig) !TestResult {
// This is just a mock implementation for testing purposes
- if config.name == \'\' {
- return error(\'Name cannot be empty\')
+ if config.name == '' {
+ return error('Name cannot be empty')
}
return TestResult{
success: config.enabled
- message: \'Test completed for \${config.name}\'
+ message: 'Test completed for \${config.name}'
code: if config.enabled { 0 } else { 1 }
}
-}'
+}"
// Parse the function
function := parse_function(function_str) or {
@@ -30,7 +30,7 @@ pub fn test_function(config TestConfig) !TestResult {
assert function.params[0].name == 'config'
assert function.params[0].typ.symbol() == 'TestConfig'
assert function.result.typ.symbol() == 'TestResult'
-
+
// Verify that the comments were correctly parsed into the description
expected_description := 'test_function is a simple function for testing the MCP tool code generation
It takes a config and returns a result'
@@ -41,9 +41,9 @@ It takes a config and returns a result'
fn test_parse_function_without_comments() {
// Test function string without comments
- function_str := 'fn simple_function(name string, count int) string {
- return \'\${name} count: \${count}\'
-}'
+ function_str := "fn simple_function(name string, count int) string {
+ return '\${name} count: \${count}'
+}"
// Parse the function
function := parse_function(function_str) or {
@@ -60,7 +60,7 @@ fn test_parse_function_without_comments() {
assert function.params[1].name == 'count'
assert function.params[1].typ.symbol() == 'int'
assert function.result.typ.symbol() == 'string'
-
+
// Verify that there is no description
assert function.description == ''
diff --git a/lib/core/code/model_module.v b/lib/core/code/model_module.v
index fcdc839d..4e9ae8cb 100644
--- a/lib/core/code/model_module.v
+++ b/lib/core/code/model_module.v
@@ -79,4 +79,4 @@ pub fn (mod Module) write_str() !string {
}
return out
-}
\ No newline at end of file
+}
diff --git a/lib/core/code/model_struct.v b/lib/core/code/model_struct.v
index 18214aa5..7e3bc674 100644
--- a/lib/core/code/model_struct.v
+++ b/lib/core/code/model_struct.v
@@ -69,10 +69,11 @@ pub fn parse_struct(code_ string) !Struct {
trimmed := line.trim_space()
if !in_struct && trimmed.starts_with('//') {
comment_lines << trimmed.trim_string_left('//').trim_space()
- } else if !in_struct && (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ')) {
+ } else if !in_struct && (trimmed.starts_with('struct ')
+ || trimmed.starts_with('pub struct ')) {
in_struct = true
struct_lines << line
-
+
// Extract struct name
is_pub = trimmed.starts_with('pub ')
mut name_part := if is_pub {
@@ -80,7 +81,7 @@ pub fn parse_struct(code_ string) !Struct {
} else {
trimmed.trim_string_left('struct ').trim_space()
}
-
+
// Handle generics in struct name
if name_part.contains('<') {
struct_name = name_part.all_before('<').trim_space()
@@ -91,72 +92,71 @@ pub fn parse_struct(code_ string) !Struct {
}
} else if in_struct {
struct_lines << line
-
+
// Check if we've reached the end of the struct
if trimmed.starts_with('}') {
break
}
}
}
-
+
if struct_name == '' {
return error('Invalid struct format: could not extract struct name')
}
-
+
// Process the struct fields
mut fields := []StructField{}
mut current_section := ''
-
+
for i := 1; i < struct_lines.len - 1; i++ { // Skip the first and last lines (struct declaration and closing brace)
line := struct_lines[i].trim_space()
-
+
// Skip empty lines and comments
if line == '' || line.starts_with('//') {
continue
}
-
+
// Check for section markers (pub:, mut:, pub mut:)
if line.ends_with(':') {
current_section = line
continue
}
-
+
// Parse field
parts := line.split_any(' \t')
if parts.len < 2 {
continue // Skip invalid lines
}
-
+
field_name := parts[0]
field_type_str := parts[1..].join(' ')
-
+
// Parse the type string into a Type object
field_type := parse_type(field_type_str)
-
+
// Determine field visibility based on section
is_pub_field := current_section.contains('pub')
is_mut_field := current_section.contains('mut')
-
+
fields << StructField{
- name: field_name
- typ: field_type
+ name: field_name
+ typ: field_type
is_pub: is_pub_field
is_mut: is_mut_field
}
}
-
+
// Process the comments into a description
description := comment_lines.join('\n')
-
+
return Struct{
- name: struct_name
+ name: struct_name
description: description
- is_pub: is_pub
- fields: fields
+ is_pub: is_pub
+ fields: fields
}
}
-
pub struct Interface {
pub mut:
name string
diff --git a/lib/core/code/model_struct_test.v b/lib/core/code/model_struct_test.v
index a176f7d5..d37526f8 100644
--- a/lib/core/code/model_struct_test.v
+++ b/lib/core/code/model_struct_test.v
@@ -21,17 +21,17 @@ pub:
It contains information about test execution'
assert result.is_pub == true
assert result.fields.len == 3
-
+
assert result.fields[0].name == 'success'
assert result.fields[0].typ.symbol() == 'bool'
assert result.fields[0].is_pub == true
assert result.fields[0].is_mut == false
-
+
assert result.fields[1].name == 'message'
assert result.fields[1].typ.symbol() == 'string'
assert result.fields[1].is_pub == true
assert result.fields[1].is_mut == false
-
+
assert result.fields[2].name == 'code'
assert result.fields[2].typ.symbol() == 'int'
assert result.fields[2].is_pub == true
@@ -55,17 +55,17 @@ mut:
assert result2.description == ''
assert result2.is_pub == false
assert result2.fields.len == 3
-
+
assert result2.fields[0].name == 'name'
assert result2.fields[0].typ.symbol() == 'string'
assert result2.fields[0].is_pub == true
assert result2.fields[0].is_mut == false
-
+
assert result2.fields[1].name == 'count'
assert result2.fields[1].typ.symbol() == 'int'
assert result2.fields[1].is_pub == false
assert result2.fields[1].is_mut == true
-
+
assert result2.fields[2].name == 'active'
assert result2.fields[2].typ.symbol() == 'bool'
assert result2.fields[2].is_pub == false
diff --git a/lib/core/code/model_types.v b/lib/core/code/model_types.v
index 3dea87d4..a4866759 100644
--- a/lib/core/code/model_types.v
+++ b/lib/core/code/model_types.v
@@ -239,7 +239,7 @@ pub fn (t Type) empty_value() string {
pub fn parse_type(type_str string) Type {
println('Parsing type string: "${type_str}"')
mut type_str_trimmed := type_str.trim_space()
-
+
// Handle struct definitions by extracting just the struct name
if type_str_trimmed.contains('struct ') {
lines := type_str_trimmed.split_into_lines()
@@ -257,7 +257,7 @@ pub fn parse_type(type_str string) Type {
}
}
}
-
+
// Check for simple types first
if type_str_trimmed == 'string' {
return String{}
@@ -266,41 +266,61 @@ pub fn parse_type(type_str string) Type {
} else if type_str_trimmed == 'int' {
return Integer{}
} else if type_str_trimmed == 'u8' {
- return Integer{bytes: 8, signed: false}
+ return Integer{
+ bytes: 8
+ signed: false
+ }
} else if type_str_trimmed == 'u16' {
- return Integer{bytes: 16, signed: false}
+ return Integer{
+ bytes: 16
+ signed: false
+ }
} else if type_str_trimmed == 'u32' {
- return Integer{bytes: 32, signed: false}
+ return Integer{
+ bytes: 32
+ signed: false
+ }
} else if type_str_trimmed == 'u64' {
- return Integer{bytes: 64, signed: false}
+ return Integer{
+ bytes: 64
+ signed: false
+ }
} else if type_str_trimmed == 'i8' {
- return Integer{bytes: 8}
+ return Integer{
+ bytes: 8
+ }
} else if type_str_trimmed == 'i16' {
- return Integer{bytes: 16}
+ return Integer{
+ bytes: 16
+ }
} else if type_str_trimmed == 'i32' {
- return Integer{bytes: 32}
+ return Integer{
+ bytes: 32
+ }
} else if type_str_trimmed == 'i64' {
- return Integer{bytes: 64}
+ return Integer{
+ bytes: 64
+ }
}
-
+
// Check for array types
if type_str_trimmed.starts_with('[]') {
elem_type := type_str_trimmed.all_after('[]')
return Array{parse_type(elem_type)}
}
-
+
// Check for map types
if type_str_trimmed.starts_with('map[') && type_str_trimmed.contains(']') {
value_type := type_str_trimmed.all_after(']')
return Map{parse_type(value_type)}
}
-
+
// Check for result types
if type_str_trimmed.starts_with('!') {
result_type := type_str_trimmed.all_after('!')
return Result{parse_type(result_type)}
}
-
+
// If no other type matches, treat as an object/struct type
println('Treating as object type: "${type_str_trimmed}"')
return Object{type_str_trimmed}
diff --git a/lib/core/code/vlang_utils.v b/lib/core/code/vlang_utils.v
index cf4cf0cf..012cb1cf 100644
--- a/lib/core/code/vlang_utils.v
+++ b/lib/core/code/vlang_utils.v
@@ -66,15 +66,17 @@ fn find_closing_brace(content string, start_i int) ?int {
// RETURNS:
// string - the function block including comments, or error if not found
pub fn get_function_from_file(file_path string, function_name string) !Function {
- content := os.read_file(file_path) or { return error('Failed to read file ${file_path}: ${err}') }
-
+ content := os.read_file(file_path) or {
+ return error('Failed to read file ${file_path}: ${err}')
+ }
+
vfile := parse_vfile(content) or { return error('Failed to parse file ${file_path}: ${err}') }
-
+
if fn_obj := vfile.get_function(function_name) {
return fn_obj
-}
-
-return error('function ${function_name} not found in file ${file_path}')
+ }
+
+ return error('function ${function_name} not found in file ${file_path}')
}
// get_function_from_module searches for a function in all V files within a module
@@ -91,15 +93,11 @@ pub fn get_function_from_module(module_path string, function_name string) !Funct
log.error('Found ${v_files} V files in ${module_path}')
for v_file in v_files {
// Read the file content
- content := os.read_file(v_file) or {
- continue
- }
-
+ content := os.read_file(v_file) or { continue }
+
// Parse the file
- vfile := parse_vfile(content) or {
- continue
- }
-
+ vfile := parse_vfile(content) or { continue }
+
// Look for the function
if fn_obj := vfile.get_function(function_name) {
return fn_obj
@@ -139,7 +137,7 @@ pub fn get_type_from_module(module_path string, type_name string) !string {
if i == -1 {
type_import := content.split_into_lines().filter(it.contains('import')
-&& it.contains(type_name))
+ && it.contains(type_name))
if type_import.len > 0 {
log.debug('debugzoooo')
mod := type_import[0].trim_space().trim_string_left('import ').all_before(' ')
diff --git a/lib/data/currency/serialize.v b/lib/data/currency/serialize.v
index 09fe2ed8..6e6c54fd 100644
--- a/lib/data/currency/serialize.v
+++ b/lib/data/currency/serialize.v
@@ -8,17 +8,17 @@ pub:
data []u8
}
-// to_bytes converts a Currency to serialized bytes
+// to_bytes converts a Currency to serialized bytes
pub fn (c Currency) to_bytes() !CurrencyBytes {
mut enc := encoder.new()
-
+
// Add unique encoding ID to identify this type of data
enc.add_u16(500) // Unique ID for Currency type
-
+
// Encode Currency fields
enc.add_string(c.name)
enc.add_f64(c.usdval)
-
+
return CurrencyBytes{
data: enc.data
}
@@ -28,16 +28,16 @@ pub fn (c Currency) to_bytes() !CurrencyBytes {
pub fn from_bytes(bytes CurrencyBytes) !Currency {
mut d := encoder.decoder_new(bytes.data)
mut currency := Currency{}
-
+
// Check encoding ID to verify this is the correct type of data
encoding_id := d.get_u16()!
if encoding_id != 500 {
return error('Wrong file type: expected encoding ID 500, got ${encoding_id}, for currency')
}
-
+
// Decode Currency fields
currency.name = d.get_string()!
currency.usdval = d.get_f64()!
-
+
return currency
}
diff --git a/lib/data/encoder/encoder_decode.v b/lib/data/encoder/encoder_decode.v
index 4c834bf1..4f913fd4 100644
--- a/lib/data/encoder/encoder_decode.v
+++ b/lib/data/encoder/encoder_decode.v
@@ -241,6 +241,6 @@ pub fn (mut d Decoder) get_map_bytes() !map[string][]u8 {
// Gets GID from encoded string
pub fn (mut d Decoder) get_gid() !gid.GID {
- gid_str := d.get_string()!
- return gid.new(gid_str)
+ gid_str := d.get_string()!
+ return gid.new(gid_str)
}
diff --git a/lib/data/encoder/encoder_test.v b/lib/data/encoder/encoder_test.v
index 14bd2c18..17d192a6 100644
--- a/lib/data/encoder/encoder_test.v
+++ b/lib/data/encoder/encoder_test.v
@@ -191,17 +191,17 @@ fn test_map_bytes() {
fn test_gid() {
// Test with a standard GID
mut e := new()
- mut g1 := gid.new("myproject:123")!
+ mut g1 := gid.new('myproject:123')!
e.add_gid(g1)
-
+
// Test with a GID that has a default circle name
- mut g2 := gid.new_from_parts("", 999)!
+ mut g2 := gid.new_from_parts('', 999)!
e.add_gid(g2)
-
+
// Test with a GID that has spaces before fixing
- mut g3 := gid.new("project1:456")!
+ mut g3 := gid.new('project1:456')!
e.add_gid(g3)
-
+
mut d := decoder_new(e.data)
assert d.get_gid()!.str() == g1.str()
assert d.get_gid()!.str() == g2.str()
@@ -211,74 +211,74 @@ fn test_gid() {
fn test_currency() {
// Create USD currency manually
mut usd_curr := currency.Currency{
- name: 'USD'
+ name: 'USD'
usdval: 1.0
}
// Create EUR currency manually
mut eur_curr := currency.Currency{
- name: 'EUR'
+ name: 'EUR'
usdval: 1.1
}
// Create Bitcoin currency manually
mut btc_curr := currency.Currency{
- name: 'BTC'
+ name: 'BTC'
usdval: 60000.0
}
// Create TFT currency manually
mut tft_curr := currency.Currency{
- name: 'TFT'
+ name: 'TFT'
usdval: 0.05
}
-
+
// Create currency amounts
mut usd_amount := currency.Amount{
currency: usd_curr
- val: 1.5
+ val: 1.5
}
-
+
mut eur_amount := currency.Amount{
currency: eur_curr
- val: 100.0
+ val: 100.0
}
-
+
mut btc_amount := currency.Amount{
currency: btc_curr
- val: 0.01
+ val: 0.01
}
-
+
mut tft_amount := currency.Amount{
currency: tft_curr
- val: 1000.0
+ val: 1000.0
}
-
+
mut e := new()
e.add_currency(usd_amount)
e.add_currency(eur_amount)
e.add_currency(btc_amount)
e.add_currency(tft_amount)
-
+
mut d := decoder_new(e.data)
-
+
// Override the currency.get function by manually checking currency names
// since we can't rely on the global currency functions for testing
mut decoded_curr1 := d.get_string()!
mut decoded_val1 := d.get_f64()!
assert decoded_curr1 == 'USD'
assert math.abs(decoded_val1 - 1.5) < 0.00001
-
+
mut decoded_curr2 := d.get_string()!
mut decoded_val2 := d.get_f64()!
assert decoded_curr2 == 'EUR'
assert math.abs(decoded_val2 - 100.0) < 0.00001
-
+
mut decoded_curr3 := d.get_string()!
mut decoded_val3 := d.get_f64()!
assert decoded_curr3 == 'BTC'
assert math.abs(decoded_val3 - 0.01) < 0.00001
-
+
mut decoded_curr4 := d.get_string()!
mut decoded_val4 := d.get_f64()!
assert decoded_curr4 == 'TFT'
diff --git a/lib/data/gid/gid.v b/lib/data/gid/gid.v
index bdb11ab5..1feb8b58 100644
--- a/lib/data/gid/gid.v
+++ b/lib/data/gid/gid.v
@@ -31,23 +31,23 @@ pub fn new(txt_ string) !GID {
}
cid_str := parts[1].trim_space()
- cid := cid_str.u32() //TODO: what if this is no nr?
+ cid := cid_str.u32() // TODO: what if this is no nr?
return GID{
circle: circle
- cid: cid
+ cid: cid
}
}
pub fn new_from_parts(circle_ string, cid u32) !GID {
- mut circle:=circle_
+ mut circle := circle_
if circle.trim_space() == '' {
- circle="default"
+ circle = 'default'
}
return GID{
circle: circle
- cid: cid
+ cid: cid
}
}
diff --git a/lib/data/tst/edge_case_prefix_test.v b/lib/data/tst/edge_case_prefix_test.v
index e04f22f2..9886bbbe 100644
--- a/lib/data/tst/edge_case_prefix_test.v
+++ b/lib/data/tst/edge_case_prefix_test.v
@@ -4,7 +4,7 @@ import os
// Define a struct for test cases
struct PrefixEdgeCaseTest {
- prefix string
+ prefix string
expected_keys []string
}
@@ -17,10 +17,20 @@ fn test_edge_case_prefix_search() {
// Keys with a common prefix that may cause issues
keys := [
- 'test', 'testing', 'tea', 'team', 'technology',
- 'apple', 'application', 'appreciate',
- 'banana', 'bandage', 'band',
- 'car', 'carpet', 'carriage'
+ 'test',
+ 'testing',
+ 'tea',
+ 'team',
+ 'technology',
+ 'apple',
+ 'application',
+ 'appreciate',
+ 'banana',
+ 'bandage',
+ 'band',
+ 'car',
+ 'carpet',
+ 'carriage',
]
// Insert all keys
@@ -36,59 +46,58 @@ fn test_edge_case_prefix_search() {
test_cases := [
// prefix, expected_keys
PrefixEdgeCaseTest{
- prefix: 'te'
+ prefix: 'te'
expected_keys: ['test', 'testing', 'tea', 'team', 'technology']
},
PrefixEdgeCaseTest{
- prefix: 'tes'
+ prefix: 'tes'
expected_keys: ['test', 'testing']
},
PrefixEdgeCaseTest{
- prefix: 'tea'
+ prefix: 'tea'
expected_keys: ['tea', 'team']
},
PrefixEdgeCaseTest{
- prefix: 'a'
+ prefix: 'a'
expected_keys: ['apple', 'application', 'appreciate']
},
PrefixEdgeCaseTest{
- prefix: 'ba'
+ prefix: 'ba'
expected_keys: ['banana', 'bandage', 'band']
},
PrefixEdgeCaseTest{
- prefix: 'ban'
+ prefix: 'ban'
expected_keys: ['banana', 'band']
},
PrefixEdgeCaseTest{
- prefix: 'c'
+ prefix: 'c'
expected_keys: ['car', 'carpet', 'carriage']
- }
+ },
]
for test_case in test_cases {
prefix := test_case.prefix
expected_keys := test_case.expected_keys
-
+
result := tree.list(prefix) or {
assert false, 'Failed to list keys with prefix "${prefix}": ${err}'
return
}
-
+
// Check count matches
- assert result.len == expected_keys.len,
- 'For prefix "${prefix}": expected ${expected_keys.len} keys, got ${result.len} (keys: ${result})'
-
+ assert result.len == expected_keys.len, 'For prefix "${prefix}": expected ${expected_keys.len} keys, got ${result.len} (keys: ${result})'
+
// Check all expected keys are present
for key in expected_keys {
assert key in result, 'Key "${key}" missing from results for prefix "${prefix}"'
}
-
+
// Verify each result starts with the prefix
for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
}
}
-
+
println('All edge case prefix tests passed successfully!')
}
@@ -102,8 +111,13 @@ fn test_tricky_insertion_order() {
// Insert keys in a specific order that might trigger the issue
// Insert 'team' first, then 'test', etc. to ensure tree layout is challenging
tricky_keys := [
- 'team', 'test', 'technology', 'tea', // 'te' prefix cases
- 'car', 'carriage', 'carpet' // 'ca' prefix cases
+ 'team',
+ 'test',
+ 'technology',
+ 'tea', // 'te' prefix cases
+ 'car',
+ 'carriage',
+ 'carpet', // 'ca' prefix cases
]
// Insert all keys
@@ -114,7 +128,7 @@ fn test_tricky_insertion_order() {
return
}
}
-
+
// Test 'te' prefix
te_results := tree.list('te') or {
assert false, 'Failed to list keys with prefix "te": ${err}'
@@ -125,7 +139,7 @@ fn test_tricky_insertion_order() {
assert 'test' in te_results, 'Expected "test" in results'
assert 'technology' in te_results, 'Expected "technology" in results'
assert 'tea' in te_results, 'Expected "tea" in results'
-
+
// Test 'ca' prefix
ca_results := tree.list('ca') or {
assert false, 'Failed to list keys with prefix "ca": ${err}'
@@ -135,6 +149,6 @@ fn test_tricky_insertion_order() {
assert 'car' in ca_results, 'Expected "car" in results'
assert 'carriage' in ca_results, 'Expected "carriage" in results'
assert 'carpet' in ca_results, 'Expected "carpet" in results'
-
+
println('All tricky insertion order tests passed successfully!')
-}
\ No newline at end of file
+}
diff --git a/lib/data/tst/prefix_test.v b/lib/data/tst/prefix_test.v
index 06345a52..c260808a 100644
--- a/lib/data/tst/prefix_test.v
+++ b/lib/data/tst/prefix_test.v
@@ -4,7 +4,7 @@ import os
// Define a struct for test cases
struct PrefixTestCase {
- prefix string
+ prefix string
expected_count int
}
@@ -17,13 +17,31 @@ fn test_complex_prefix_search() {
// Insert a larger set of keys with various prefixes
keys := [
- 'a', 'ab', 'abc', 'abcd', 'abcde',
- 'b', 'bc', 'bcd', 'bcde',
- 'c', 'cd', 'cde',
- 'x', 'xy', 'xyz',
- 'test', 'testing', 'tested', 'tests',
- 'team', 'teammate', 'teams',
- 'tech', 'technology', 'technical'
+ 'a',
+ 'ab',
+ 'abc',
+ 'abcd',
+ 'abcde',
+ 'b',
+ 'bc',
+ 'bcd',
+ 'bcde',
+ 'c',
+ 'cd',
+ 'cde',
+ 'x',
+ 'xy',
+ 'xyz',
+ 'test',
+ 'testing',
+ 'tested',
+ 'tests',
+ 'team',
+ 'teammate',
+ 'teams',
+ 'tech',
+ 'technology',
+ 'technical',
]
// Insert all keys
@@ -54,8 +72,8 @@ fn test_complex_prefix_search() {
PrefixTestCase{'x', 3},
PrefixTestCase{'xy', 2},
PrefixTestCase{'xyz', 1},
- PrefixTestCase{'z', 0}, // No matches
- PrefixTestCase{'', keys.len} // All keys
+ PrefixTestCase{'z', 0}, // No matches
+ PrefixTestCase{'', keys.len}, // All keys
]
for test_case in test_cases {
@@ -70,7 +88,7 @@ fn test_complex_prefix_search() {
}
assert result.len == expected_count, 'For prefix "${prefix}": expected ${expected_count} keys, got ${result.len}'
-
+
// Verify each result starts with the prefix
for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
@@ -87,13 +105,21 @@ fn test_special_prefix_search() {
// Insert keys with special characters and longer strings
special_keys := [
- 'user:1:profile', 'user:1:settings', 'user:1:posts',
- 'user:2:profile', 'user:2:settings',
- 'config:app:name', 'config:app:version', 'config:app:debug',
- 'config:db:host', 'config:db:port',
- 'data:2023:01:01', 'data:2023:01:02', 'data:2023:02:01',
+ 'user:1:profile',
+ 'user:1:settings',
+ 'user:1:posts',
+ 'user:2:profile',
+ 'user:2:settings',
+ 'config:app:name',
+ 'config:app:version',
+ 'config:app:debug',
+ 'config:db:host',
+ 'config:db:port',
+ 'data:2023:01:01',
+ 'data:2023:01:02',
+ 'data:2023:02:01',
'very:long:key:with:multiple:segments:and:special:characters:!@#$%^&*()',
- 'another:very:long:key:with:different:segments'
+ 'another:very:long:key:with:different:segments',
]
// Insert all keys
@@ -118,7 +144,7 @@ fn test_special_prefix_search() {
PrefixTestCase{'data:2023:01:', 2},
PrefixTestCase{'very:', 1},
PrefixTestCase{'another:', 1},
- PrefixTestCase{'nonexistent:', 0}
+ PrefixTestCase{'nonexistent:', 0},
]
for test_case in special_test_cases {
@@ -133,7 +159,7 @@ fn test_special_prefix_search() {
}
assert result.len == expected_count, 'For prefix "${prefix}": expected ${expected_count} keys, got ${result.len}'
-
+
// Verify each result starts with the prefix
for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
@@ -151,9 +177,9 @@ fn test_prefix_search_performance() {
// Generate a larger dataset (1000 keys)
prefixes := ['user', 'config', 'data', 'app', 'service', 'api', 'test', 'dev', 'prod', 'staging']
mut large_keys := []string{}
-
+
for prefix in prefixes {
- for i in 0..100 {
+ for i in 0 .. 100 {
large_keys << '${prefix}:${i}:name'
}
}
@@ -175,7 +201,7 @@ fn test_prefix_search_performance() {
}
assert result.len == 100, 'For prefix "${prefix}:": expected 100 keys, got ${result.len}'
-
+
// Verify each result starts with the prefix
for key in result {
assert key.starts_with(prefix + ':'), 'Key "${key}" does not start with prefix "${prefix}:"'
@@ -184,7 +210,7 @@ fn test_prefix_search_performance() {
// Test more specific prefixes
for prefix in prefixes {
- for i in 0..10 {
+ for i in 0 .. 10 {
specific_prefix := '${prefix}:${i}'
result := tree.list(specific_prefix) or {
assert false, 'Failed to list keys with prefix "${specific_prefix}": ${err}'
@@ -195,4 +221,4 @@ fn test_prefix_search_performance() {
assert result[0] == '${specific_prefix}:name', 'Expected "${specific_prefix}:name", got "${result[0]}"'
}
}
-}
\ No newline at end of file
+}
diff --git a/lib/data/tst/serialize.v b/lib/data/tst/serialize.v
index e72348ae..944c3def 100644
--- a/lib/data/tst/serialize.v
+++ b/lib/data/tst/serialize.v
@@ -62,11 +62,11 @@ fn deserialize_node(data []u8) !Node {
right_id := d.get_u32()!
return Node{
- character: character
+ character: character
is_end_of_string: is_end_of_string
- value: value
- left_id: left_id
- middle_id: middle_id
- right_id: right_id
+ value: value
+ left_id: left_id
+ middle_id: middle_id
+ right_id: right_id
}
-}
\ No newline at end of file
+}
diff --git a/lib/data/tst/serialize_test.v b/lib/data/tst/serialize_test.v
index 9429d406..1ef5c70a 100644
--- a/lib/data/tst/serialize_test.v
+++ b/lib/data/tst/serialize_test.v
@@ -4,23 +4,23 @@ module tst
fn test_node_serialization() {
// Create a leaf node (end of string)
leaf_node := Node{
- character: `a`
+ character: `a`
is_end_of_string: true
- value: 'test value'.bytes()
- left_id: 0
- middle_id: 0
- right_id: 0
+ value: 'test value'.bytes()
+ left_id: 0
+ middle_id: 0
+ right_id: 0
}
// Serialize the leaf node
leaf_data := serialize_node(leaf_node)
-
+
// Deserialize and verify
deserialized_leaf := deserialize_node(leaf_data) or {
assert false, 'Failed to deserialize leaf node: ${err}'
return
}
-
+
assert deserialized_leaf.character == leaf_node.character, 'Character mismatch'
assert deserialized_leaf.is_end_of_string == leaf_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_leaf.value.bytestr() == leaf_node.value.bytestr(), 'Value mismatch'
@@ -30,23 +30,23 @@ fn test_node_serialization() {
// Create an internal node (not end of string)
internal_node := Node{
- character: `b`
+ character: `b`
is_end_of_string: false
- value: []u8{}
- left_id: 10
- middle_id: 20
- right_id: 30
+ value: []u8{}
+ left_id: 10
+ middle_id: 20
+ right_id: 30
}
// Serialize the internal node
internal_data := serialize_node(internal_node)
-
+
// Deserialize and verify
deserialized_internal := deserialize_node(internal_data) or {
assert false, 'Failed to deserialize internal node: ${err}'
return
}
-
+
assert deserialized_internal.character == internal_node.character, 'Character mismatch'
assert deserialized_internal.is_end_of_string == internal_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_internal.value.len == 0, 'Value should be empty'
@@ -56,23 +56,23 @@ fn test_node_serialization() {
// Create a root node
root_node := Node{
- character: 0 // null character for root
+ character: 0 // null character for root
is_end_of_string: false
- value: []u8{}
- left_id: 5
- middle_id: 15
- right_id: 25
+ value: []u8{}
+ left_id: 5
+ middle_id: 15
+ right_id: 25
}
// Serialize the root node
root_data := serialize_node(root_node)
-
+
// Deserialize and verify
deserialized_root := deserialize_node(root_data) or {
assert false, 'Failed to deserialize root node: ${err}'
return
}
-
+
assert deserialized_root.character == root_node.character, 'Character mismatch'
assert deserialized_root.is_end_of_string == root_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_root.value.len == 0, 'Value should be empty'
@@ -85,23 +85,23 @@ fn test_node_serialization() {
fn test_special_serialization() {
// Create a node with special character
special_node := Node{
- character: `!` // special character
+ character: `!` // special character
is_end_of_string: true
- value: 'special value with spaces and symbols: !@#$%^&*()'.bytes()
- left_id: 42
- middle_id: 99
- right_id: 123
+ value: 'special value with spaces and symbols: !@#$%^&*()'.bytes()
+ left_id: 42
+ middle_id: 99
+ right_id: 123
}
// Serialize the special node
special_data := serialize_node(special_node)
-
+
// Deserialize and verify
deserialized_special := deserialize_node(special_data) or {
assert false, 'Failed to deserialize special node: ${err}'
return
}
-
+
assert deserialized_special.character == special_node.character, 'Character mismatch'
assert deserialized_special.is_end_of_string == special_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_special.value.bytestr() == special_node.value.bytestr(), 'Value mismatch'
@@ -111,37 +111,37 @@ fn test_special_serialization() {
// Create a node with a large value
mut large_value := []u8{len: 1000}
- for i in 0..1000 {
+ for i in 0 .. 1000 {
large_value[i] = u8(i % 256)
}
-
+
large_node := Node{
- character: `z`
+ character: `z`
is_end_of_string: true
- value: large_value
- left_id: 1
- middle_id: 2
- right_id: 3
+ value: large_value
+ left_id: 1
+ middle_id: 2
+ right_id: 3
}
// Serialize the large node
large_data := serialize_node(large_node)
-
+
// Deserialize and verify
deserialized_large := deserialize_node(large_data) or {
assert false, 'Failed to deserialize large node: ${err}'
return
}
-
+
assert deserialized_large.character == large_node.character, 'Character mismatch'
assert deserialized_large.is_end_of_string == large_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_large.value.len == large_node.value.len, 'Value length mismatch'
-
+
// Check each byte of the large value
- for i in 0..large_node.value.len {
+ for i in 0 .. large_node.value.len {
assert deserialized_large.value[i] == large_node.value[i], 'Value byte mismatch at index ${i}'
}
-
+
assert deserialized_large.left_id == large_node.left_id, 'left_id mismatch'
assert deserialized_large.middle_id == large_node.middle_id, 'middle_id mismatch'
assert deserialized_large.right_id == large_node.right_id, 'right_id mismatch'
@@ -151,24 +151,24 @@ fn test_special_serialization() {
fn test_version_handling() {
// Create a valid node
valid_node := Node{
- character: `a`
+ character: `a`
is_end_of_string: true
- value: 'test'.bytes()
- left_id: 0
- middle_id: 0
- right_id: 0
+ value: 'test'.bytes()
+ left_id: 0
+ middle_id: 0
+ right_id: 0
}
// Serialize the node
mut valid_data := serialize_node(valid_node)
-
+
// Corrupt the version byte
valid_data[0] = 99 // Invalid version
-
+
// Attempt to deserialize with invalid version
deserialize_node(valid_data) or {
assert err.str().contains('Invalid version byte'), 'Expected version error, got: ${err}'
return
}
assert false, 'Expected error for invalid version byte'
-}
\ No newline at end of file
+}
diff --git a/lib/data/tst/texttools.v b/lib/data/tst/texttools.v
index 25453e65..76d11162 100644
--- a/lib/data/tst/texttools.v
+++ b/lib/data/tst/texttools.v
@@ -6,9 +6,9 @@ module tst
// - replaces special characters with standard ones
pub fn namefix(s string) string {
mut result := s.trim_space().to_lower()
-
+
// Replace any problematic characters or sequences if needed
// For this implementation, we'll keep it simple
-
+
return result
-}
\ No newline at end of file
+}
diff --git a/lib/data/tst/tst.v b/lib/data/tst/tst.v
index eb808246..2a4265d0 100644
--- a/lib/data/tst/tst.v
+++ b/lib/data/tst/tst.v
@@ -5,12 +5,12 @@ import freeflowuniverse.herolib.data.ourdb
// Represents a node in the ternary search tree
struct Node {
mut:
- character u8 // The character stored at this nodexs
- is_end_of_string bool // Flag indicating if this node represents the end of a key
- value []u8 // The value associated with the key (if this node is the end of a key)
- left_id u32 // Database ID for left child (character < node.character)
- middle_id u32 // Database ID for middle child (character == node.character)
- right_id u32 // Database ID for right child (character > node.character)
+ character u8 // The character stored at this nodexs
+ is_end_of_string bool // Flag indicating if this node represents the end of a key
+ value []u8 // The value associated with the key (if this node is the end of a key)
+ left_id u32 // Database ID for left child (character < node.character)
+ middle_id u32 // Database ID for middle child (character == node.character)
+ right_id u32 // Database ID for right child (character > node.character)
}
// TST represents a ternary search tree data structure
@@ -39,18 +39,18 @@ pub fn new(args NewArgs) !TST {
)!
mut root_id := u32(1) // First ID in ourdb is now 1 instead of 0
-
+
if db.get_next_id()! == 1 {
// Create a new root node if the database is empty
// We'll use a null character (0) for the root node
println('Creating new root node')
root := Node{
- character: 0
+ character: 0
is_end_of_string: false
- value: []u8{}
- left_id: 0
- middle_id: 0
- right_id: 0
+ value: []u8{}
+ left_id: 0
+ middle_id: 0
+ right_id: 0
}
root_id = db.set(data: serialize_node(root))!
println('Root node created with ID: ${root_id}')
@@ -74,7 +74,7 @@ pub fn new(args NewArgs) !TST {
pub fn (mut self TST) set(key string, value []u8) ! {
normalized_key := namefix(key)
println('Setting key: "${key}" (normalized: "${normalized_key}")')
-
+
if normalized_key.len == 0 {
return error('Empty key not allowed')
}
@@ -83,12 +83,12 @@ pub fn (mut self TST) set(key string, value []u8) ! {
if self.root_id == 0 {
println('Tree is empty, creating root node')
root := Node{
- character: 0
+ character: 0
is_end_of_string: false
- value: []u8{}
- left_id: 0
- middle_id: 0
- right_id: 0
+ value: []u8{}
+ left_id: 0
+ middle_id: 0
+ right_id: 0
}
self.root_id = self.db.set(data: serialize_node(root))!
println('Root node created with ID: ${self.root_id}')
@@ -97,12 +97,12 @@ pub fn (mut self TST) set(key string, value []u8) ! {
// Insert the key-value pair
mut last_node_id := self.insert_recursive(self.root_id, normalized_key, 0, value)!
println('Key "${normalized_key}" inserted to node ${last_node_id}')
-
+
// Make sure the last node is marked as end of string with the value
if last_node_id != 0 {
node_data := self.db.get(last_node_id)!
mut node := deserialize_node(node_data)!
-
+
// Ensure this node is marked as the end of a string
if !node.is_end_of_string {
println('Setting node ${last_node_id} as end of string')
@@ -111,7 +111,7 @@ pub fn (mut self TST) set(key string, value []u8) ! {
self.db.set(id: last_node_id, data: serialize_node(node))!
}
}
-
+
println('Key "${normalized_key}" inserted successfully')
}
@@ -126,33 +126,33 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
// If we've reached the end of the tree, create a new node
if node_id == 0 {
println('Creating new node for character: ${key[pos]} (${key[pos].ascii_str()}) at position ${pos}')
-
+
// Create a node for this character
new_node := Node{
- character: key[pos]
+ character: key[pos]
is_end_of_string: pos == key.len - 1
- value: if pos == key.len - 1 { value.clone() } else { []u8{} }
- left_id: 0
- middle_id: 0
- right_id: 0
+ value: if pos == key.len - 1 { value.clone() } else { []u8{} }
+ left_id: 0
+ middle_id: 0
+ right_id: 0
}
new_id := self.db.set(data: serialize_node(new_node))!
println('New node created with ID: ${new_id}, character: ${key[pos]} (${key[pos].ascii_str()}), is_end: ${pos == key.len - 1}')
-
+
// If this is the last character in the key, we're done
if pos == key.len - 1 {
return new_id
}
-
+
// Otherwise, create the next node in the sequence and link to it
next_id := self.insert_recursive(0, key, pos + 1, value)!
-
+
// Update the middle link
node_data := self.db.get(new_id)!
mut updated_node := deserialize_node(node_data)!
updated_node.middle_id = next_id
self.db.set(id: new_id, data: serialize_node(updated_node))!
-
+
return new_id
}
@@ -161,14 +161,14 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
println('Failed to get node data for ID ${node_id}')
return error('Node retrieval error: ${err}')
}
-
+
mut node := deserialize_node(node_data) or {
println('Failed to deserialize node with ID ${node_id}')
return error('Node deserialization error: ${err}')
}
-
+
println('Node ${node_id}: character=${node.character} (${node.character.ascii_str()}), is_end=${node.is_end_of_string}, left=${node.left_id}, middle=${node.middle_id}, right=${node.right_id}')
-
+
// Compare the current character with the node's character
if key[pos] < node.character {
println('Going left for character: ${key[pos]} (${key[pos].ascii_str()}) < ${node.character} (${node.character.ascii_str()})')
@@ -189,7 +189,7 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
node.value = value
self.db.set(id: node_id, data: serialize_node(node))!
} else {
- println('Going middle for next character: ${key[pos+1]} (${key[pos+1].ascii_str()})')
+ println('Going middle for next character: ${key[pos + 1]} (${key[pos + 1].ascii_str()})')
// Move to the next character in the key
node.middle_id = self.insert_recursive(node.middle_id, key, pos + 1, value)!
self.db.set(id: node_id, data: serialize_node(node))!
@@ -203,7 +203,7 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
pub fn (mut self TST) get(key string) ![]u8 {
normalized_key := namefix(key)
println('Getting key: "${key}" (normalized: "${normalized_key}")')
-
+
if normalized_key.len == 0 {
return error('Empty key not allowed')
}
@@ -222,48 +222,44 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
println('Node ID is 0, key not found')
return error('Key not found')
}
-
+
if pos >= key.len {
println('Position ${pos} out of bounds for key "${key}"')
return error('Key not found - position out of bounds')
}
-
+
// Get the node
node_data := self.db.get(node_id) or {
println('Failed to get node ${node_id}')
return error('Node not found in database')
}
-
+
node := deserialize_node(node_data) or {
println('Failed to deserialize node ${node_id}')
return error('Failed to deserialize node')
}
-
+
println('Searching node ${node_id}: char=${node.character}, pos=${pos}, key_char=${key[pos]}')
-
+
mut result := []u8{}
-
+
// Left branch
if key[pos] < node.character {
println('Going left')
- result = self.search_recursive(node.left_id, key, pos) or {
- return error(err.str())
- }
+ result = self.search_recursive(node.left_id, key, pos) or { return error(err.str()) }
return result
}
-
+
// Right branch
if key[pos] > node.character {
println('Going right')
- result = self.search_recursive(node.right_id, key, pos) or {
- return error(err.str())
- }
+ result = self.search_recursive(node.right_id, key, pos) or { return error(err.str()) }
return result
}
-
+
// Character matches
println('Character match')
-
+
// At end of key
if pos == key.len - 1 {
if node.is_end_of_string {
@@ -278,17 +274,15 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
return error('Key not found - not marked as end of string')
}
}
-
+
// Not at end of key, go to middle
if node.middle_id == 0 {
println('No middle child')
return error('Key not found - no middle child')
}
-
+
println('Going to middle child')
- result = self.search_recursive(node.middle_id, key, pos + 1) or {
- return error(err.str())
- }
+ result = self.search_recursive(node.middle_id, key, pos + 1) or { return error(err.str()) }
return result
}
@@ -296,7 +290,7 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
pub fn (mut self TST) delete(key string) ! {
normalized_key := namefix(key)
println('Deleting key: "${key}" (normalized: "${normalized_key}")')
-
+
if normalized_key.len == 0 {
return error('Empty key not allowed')
}
@@ -315,7 +309,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Node ID is 0, key not found')
return error('Key not found')
}
-
+
// Check for position out of bounds
if pos >= key.len {
println('Position ${pos} is out of bounds for key "${key}"')
@@ -327,12 +321,12 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Failed to get node data for ID ${node_id}')
return error('Node retrieval error: ${err}')
}
-
+
mut node := deserialize_node(node_data) or {
println('Failed to deserialize node with ID ${node_id}')
return error('Node deserialization error: ${err}')
}
-
+
println('Deleting from node ${node_id}: character=${node.character} (${node.character.ascii_str()}), is_end=${node.is_end_of_string}, left=${node.left_id}, middle=${node.middle_id}, right=${node.right_id}, pos=${pos}')
mut deleted := false
@@ -343,7 +337,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Left child is null, key not found')
return error('Key not found')
}
-
+
deleted = self.delete_recursive(node.left_id, key, pos)!
if deleted && node.left_id != 0 {
// Check if the left child has been deleted
@@ -364,7 +358,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Right child is null, key not found')
return error('Key not found')
}
-
+
deleted = self.delete_recursive(node.right_id, key, pos)!
if deleted && node.right_id != 0 {
// Check if the right child has been deleted
@@ -405,12 +399,12 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
}
} else {
// Move to the next character in the key
- println('Moving to next character: ${key[pos+1]} (${key[pos+1].ascii_str()})')
+ println('Moving to next character: ${key[pos + 1]} (${key[pos + 1].ascii_str()})')
if node.middle_id == 0 {
println('Middle child is null, key not found')
return error('Key not found')
}
-
+
deleted = self.delete_recursive(node.middle_id, key, pos + 1)!
if deleted && node.middle_id != 0 {
// Check if the middle child has been deleted
diff --git a/lib/data/tst/tst_list.v b/lib/data/tst/tst_list.v
index b7eed72d..ef4ade09 100644
--- a/lib/data/tst/tst_list.v
+++ b/lib/data/tst/tst_list.v
@@ -18,17 +18,17 @@ pub fn (mut self TST) list(prefix string) ![]string {
// Find the prefix node first
result_info := self.navigate_to_prefix(self.root_id, normalized_prefix, 0)
-
+
if !result_info.found {
println('Prefix node not found for "${normalized_prefix}"')
return result // Empty result
}
-
+
println('Found node for prefix "${normalized_prefix}" at node ${result_info.node_id}, collecting keys')
-
+
// Collect all keys from the subtree rooted at the prefix node
self.collect_keys_with_prefix(result_info.node_id, result_info.prefix, mut result)!
-
+
println('Found ${result.len} keys with prefix "${normalized_prefix}": ${result}')
return result
}
@@ -45,23 +45,31 @@ fn (mut self TST) navigate_to_prefix(node_id u32, prefix string, pos int) Prefix
// Base case: no node or out of bounds
if node_id == 0 || pos >= prefix.len {
return PrefixSearchResult{
- found: false
+ found: false
node_id: 0
- prefix: ''
+ prefix: ''
}
}
-
+
// Get node
node_data := self.db.get(node_id) or {
- return PrefixSearchResult{found: false, node_id: 0, prefix: ''}
+ return PrefixSearchResult{
+ found: false
+ node_id: 0
+ prefix: ''
+ }
}
-
+
node := deserialize_node(node_data) or {
- return PrefixSearchResult{found: false, node_id: 0, prefix: ''}
+ return PrefixSearchResult{
+ found: false
+ node_id: 0
+ prefix: ''
+ }
}
-
+
println('Navigating node ${node_id}: char=${node.character} (${node.character.ascii_str()}), pos=${pos}, prefix_char=${prefix[pos]} (${prefix[pos].ascii_str()})')
-
+
// Character comparison
if prefix[pos] < node.character {
// Go left
@@ -74,24 +82,28 @@ fn (mut self TST) navigate_to_prefix(node_id u32, prefix string, pos int) Prefix
} else {
// Character match
println('Character match found')
-
+
// Check if we're at the end of the prefix
if pos == prefix.len - 1 {
println('Reached end of prefix at node ${node_id}')
// Return the exact prefix string that was passed in
return PrefixSearchResult{
- found: true
+ found: true
node_id: node_id
- prefix: prefix
+ prefix: prefix
}
}
-
+
// Not at end of prefix, check middle child
if node.middle_id == 0 {
println('No middle child, prefix not found')
- return PrefixSearchResult{found: false, node_id: 0, prefix: ''}
+ return PrefixSearchResult{
+ found: false
+ node_id: 0
+ prefix: ''
+ }
}
-
+
// Continue to middle child with next character
return self.navigate_to_prefix(node.middle_id, prefix, pos + 1)
}
@@ -102,17 +114,17 @@ fn (mut self TST) collect_keys_with_prefix(node_id u32, prefix string, mut resul
if node_id == 0 {
return
}
-
+
// Get node
node_data := self.db.get(node_id) or { return }
node := deserialize_node(node_data) or { return }
-
+
println('Collecting from node ${node_id}, char=${node.character} (${node.character.ascii_str()}), prefix="${prefix}"')
-
+
// If this node is an end of string and it's not the root, we found a key
if node.is_end_of_string && node.character != 0 {
// The prefix may already contain this node's character
- if prefix.len == 0 || prefix[prefix.len-1] != node.character {
+ if prefix.len == 0 || prefix[prefix.len - 1] != node.character {
println('Found complete key: "${prefix}${node.character.ascii_str()}"')
result << prefix + node.character.ascii_str()
} else {
@@ -120,24 +132,24 @@ fn (mut self TST) collect_keys_with_prefix(node_id u32, prefix string, mut resul
result << prefix
}
}
-
+
// Recursively search all children
if node.left_id != 0 {
self.collect_keys_with_prefix(node.left_id, prefix, mut result)!
}
-
+
// For middle child, we need to add this node's character to the prefix
if node.middle_id != 0 {
mut next_prefix := prefix
if node.character != 0 { // Skip root node
// Only add the character if it's not already at the end of the prefix
- if prefix.len == 0 || prefix[prefix.len-1] != node.character {
+ if prefix.len == 0 || prefix[prefix.len - 1] != node.character {
next_prefix += node.character.ascii_str()
}
}
self.collect_keys_with_prefix(node.middle_id, next_prefix, mut result)!
}
-
+
if node.right_id != 0 {
self.collect_keys_with_prefix(node.right_id, prefix, mut result)!
}
@@ -148,19 +160,19 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
if node_id == 0 {
return
}
-
+
// Get node
node_data := self.db.get(node_id) or { return }
node := deserialize_node(node_data) or { return }
-
+
// Calculate current path
mut current_prefix := prefix
-
+
// If this is not the root, add the character
if node.character != 0 {
current_prefix += node.character.ascii_str()
}
-
+
// If this marks the end of a key, add it to the result
if node.is_end_of_string {
println('Found key: ${current_prefix}')
@@ -168,16 +180,16 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
result << current_prefix
}
}
-
+
// Visit all children
if node.left_id != 0 {
self.collect_all_keys(node.left_id, prefix, mut result)!
}
-
+
if node.middle_id != 0 {
self.collect_all_keys(node.middle_id, current_prefix, mut result)!
}
-
+
if node.right_id != 0 {
self.collect_all_keys(node.right_id, prefix, mut result)!
}
@@ -187,7 +199,7 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
pub fn (mut self TST) getall(prefix string) ![][]u8 {
normalized_prefix := namefix(prefix)
println('Getting all values with prefix: "${prefix}" (normalized: "${normalized_prefix}")')
-
+
// Get all matching keys
keys := self.list(normalized_prefix)!
@@ -201,4 +213,4 @@ pub fn (mut self TST) getall(prefix string) ![][]u8 {
println('Found ${values.len} values with prefix "${normalized_prefix}"')
return values
-}
\ No newline at end of file
+}
diff --git a/lib/data/tst/tst_test.v b/lib/data/tst/tst_test.v
index 8e4dbd15..dd0c419a 100644
--- a/lib/data/tst/tst_test.v
+++ b/lib/data/tst/tst_test.v
@@ -182,13 +182,13 @@ fn test_getall() {
return
}
assert hel_values.len == 2, 'Expected 2 values with prefix "hel", got ${hel_values.len}'
-
+
// Convert byte arrays to strings for easier comparison
mut hel_strings := []string{}
for val in hel_values {
hel_strings << val.bytestr()
}
-
+
assert 'world' in hel_strings, 'Expected "world" in values with prefix "hel"'
assert 'me' in hel_strings, 'Expected "me" in values with prefix "hel"'
}
@@ -232,4 +232,4 @@ fn test_persistence() {
}
assert value2.bytestr() == 'value', 'Expected "value", got "${value2.bytestr()}"'
}
-}
\ No newline at end of file
+}
diff --git a/lib/dav/webdav/model_property_test.v b/lib/dav/webdav/model_property_test.v
index 4474ea6d..a647e8ef 100644
--- a/lib/dav/webdav/model_property_test.v
+++ b/lib/dav/webdav/model_property_test.v
@@ -116,11 +116,11 @@ fn (p CustomProperty) xml_str() string {
fn test_custom_property() {
// Test custom property
custom_prop := CustomProperty{
- name: 'author'
- value: 'Kristof'
+ name: 'author'
+ value: 'Kristof'
namespace: 'C'
}
-
+
assert custom_prop.xml_str() == 'Kristof'
assert custom_prop.xml_name() == ''
}
@@ -131,16 +131,15 @@ fn test_propfind_response() {
props << DisplayName('test-file.txt')
props << GetLastModified('Mon, 01 Jan 2024 12:00:00 GMT')
props << GetContentLength('1024')
-
+
// Build a complete PROPFIND response with multistatus
xml_output := '
/test-file.txt
${props.xml_str()}
- '
-
- // Verify the XML structure
+ ' // Verify the XML structure
+
assert xml_output.contains('')
assert xml_output.contains('')
@@ -157,7 +156,7 @@ fn test_propfind_with_missing_properties() {
HTTP/1.1 404 Not Found
'
-
+
// Simple verification of structure
assert missing_prop_response.contains('')
assert missing_prop_response.contains('')
@@ -167,12 +166,12 @@ fn test_propfind_with_missing_properties() {
fn test_supported_lock_detailed() {
supported_lock := SupportedLock('')
xml_output := supported_lock.xml_str()
-
+
// Test SupportedLock provides a fully formed XML snippet for supportedlock
// Note: This test assumes the actual implementation returns a simplified version
// as indicated by the xml_str() method which returns '...'
assert xml_output.contains('')
-
+
// Detailed testing would need proper parsing of the XML to verify elements
// For real implementation, test should check for:
// - lockentry elements
@@ -183,11 +182,11 @@ fn test_supported_lock_detailed() {
fn test_proppatch_request() {
// Create property to set
author_prop := CustomProperty{
- name: 'author'
- value: 'Kristof'
+ name: 'author'
+ value: 'Kristof'
namespace: 'C'
}
-
+
// Create XML for PROPPATCH request (set)
proppatch_set := '
@@ -195,14 +194,13 @@ fn test_proppatch_request() {
${author_prop.xml_str()}
- '
-
- // Check structure
+ ' // Check structure
+
assert proppatch_set.contains('')
assert proppatch_set.contains('')
assert proppatch_set.contains('Kristof')
-
+
// Create XML for PROPPATCH request (remove)
proppatch_remove := '
@@ -211,7 +209,7 @@ fn test_proppatch_request() {
'
-
+
// Check structure
assert proppatch_remove.contains('')
@@ -224,7 +222,7 @@ fn test_prop_name_listing() {
mut props := []Property{}
props << DisplayName('file.txt')
props << GetContentType('text/plain')
-
+
// Generate propname response
// Note: In a complete implementation, there would be a function to generate this XML
// For testing purposes, we're manually creating the expected structure
@@ -240,7 +238,7 @@ fn test_prop_name_listing() {
'
-
+
// Verify structure
assert propname_response.contains('')
@@ -262,7 +260,7 @@ fn test_namespace_declarations() {
'
-
+
// Verify key namespace elements
assert response_with_ns.contains('xmlns:D="DAV:"')
assert response_with_ns.contains('xmlns:C="http://example.com/customns"')
@@ -290,7 +288,7 @@ fn test_depth_header_responses() {
'
-
+
// Verify structure contains multiple responses
assert multi_response.contains('')
assert multi_response.count('') == 2
diff --git a/lib/dav/webdav/server.v b/lib/dav/webdav/server.v
index 9a9520ff..afcafedc 100644
--- a/lib/dav/webdav/server.v
+++ b/lib/dav/webdav/server.v
@@ -303,22 +303,22 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
// Check if this is a binary file upload based on content type
content_type := ctx.req.header.get(.content_type) or { '' }
is_binary := is_binary_content_type(content_type)
-
+
// Handle binary uploads directly
if is_binary {
log.info('[WebDAV] Processing binary upload for ${path} (${content_type})')
-
+
// Handle the binary upload directly
ctx.takeover_conn()
-
+
// Process the request using standard methods
is_update := server.vfs.exists(path)
-
+
// Return success response
ctx.res.set_status(if is_update { .ok } else { .created })
return veb.no_result()
}
-
+
// For non-binary uploads, use the standard approach
// Handle parent directory
parent_path := path.all_before_last('/')
@@ -345,13 +345,13 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
ctx.res.set_status(.conflict)
return ctx.text('HTTP 409: Conflict - Cannot replace directory with file')
}
-
+
// Create the file after deleting the directory
server.vfs.file_create(path) or {
log.error('[WebDAV] Failed to create file ${path} after deleting directory: ${err.msg()}')
return ctx.server_error('Failed to create file: ${err.msg()}')
}
-
+
// Now it's not an update anymore
is_update = false
}
@@ -602,22 +602,15 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
fn is_binary_content_type(content_type string) bool {
// Normalize the content type by converting to lowercase
normalized := content_type.to_lower()
-
+
// Check for common binary file types
- return normalized.contains('application/octet-stream') ||
- (normalized.contains('application/') && (
- normalized.contains('msword') ||
- normalized.contains('excel') ||
- normalized.contains('powerpoint') ||
- normalized.contains('pdf') ||
- normalized.contains('zip') ||
- normalized.contains('gzip') ||
- normalized.contains('x-tar') ||
- normalized.contains('x-7z') ||
- normalized.contains('x-rar')
- )) ||
- (normalized.contains('image/') && !normalized.contains('svg')) ||
- normalized.contains('audio/') ||
- normalized.contains('video/') ||
- normalized.contains('vnd.openxmlformats') // Office documents
+ return normalized.contains('application/octet-stream')
+ || (normalized.contains('application/') && (normalized.contains('msword')
+ || normalized.contains('excel') || normalized.contains('powerpoint')
+ || normalized.contains('pdf') || normalized.contains('zip')
+ || normalized.contains('gzip') || normalized.contains('x-tar')
+ || normalized.contains('x-7z') || normalized.contains('x-rar')))
+ || (normalized.contains('image/') && !normalized.contains('svg'))
+ || normalized.contains('audio/') || normalized.contains('video/')
+ || normalized.contains('vnd.openxmlformats') // Office documents
}
diff --git a/lib/dav/webdav/server_propfind.v b/lib/dav/webdav/server_propfind.v
index 93c4ba79..460b83d0 100644
--- a/lib/dav/webdav/server_propfind.v
+++ b/lib/dav/webdav/server_propfind.v
@@ -66,19 +66,35 @@ fn (mut server Server) get_entry_property(entry &vfs.FSEntry, name string) !Prop
property_name := if name.contains(':') { name.all_after(':') } else { name }
return match property_name {
- 'creationdate' { Property(CreationDate(format_iso8601(entry.get_metadata().created_time()))) }
- 'getetag' { Property(GetETag(entry.get_metadata().id.str())) }
- 'resourcetype' { Property(ResourceType(entry.is_dir())) }
- 'getlastmodified', 'lastmodified_server' {
+ 'creationdate' {
+ Property(CreationDate(format_iso8601(entry.get_metadata().created_time())))
+ }
+ 'getetag' {
+ Property(GetETag(entry.get_metadata().id.str()))
+ }
+ 'resourcetype' {
+ Property(ResourceType(entry.is_dir()))
+ }
+ 'getlastmodified', 'lastmodified_server' {
// Both standard getlastmodified and custom lastmodified_server properties
// return the same information
Property(GetLastModified(texttools.format_rfc1123(entry.get_metadata().modified_time())))
}
- 'getcontentlength' { Property(GetContentLength(entry.get_metadata().size.str())) }
- 'quota-available-bytes' { Property(QuotaAvailableBytes(16184098816)) }
- 'quota-used-bytes' { Property(QuotaUsedBytes(16184098816)) }
- 'quotaused' { Property(QuotaUsed(16184098816)) }
- 'quota' { Property(Quota(16184098816)) }
+ 'getcontentlength' {
+ Property(GetContentLength(entry.get_metadata().size.str()))
+ }
+ 'quota-available-bytes' {
+ Property(QuotaAvailableBytes(16184098816))
+ }
+ 'quota-used-bytes' {
+ Property(QuotaUsedBytes(16184098816))
+ }
+ 'quotaused' {
+ Property(QuotaUsed(16184098816))
+ }
+ 'quota' {
+ Property(Quota(16184098816))
+ }
'displayname' {
// RFC 4918, Section 15.2: displayname is a human-readable name for UI display
// For now, we use the filename as the displayname, but this could be enhanced
@@ -102,7 +118,7 @@ fn (mut server Server) get_entry_property(entry &vfs.FSEntry, name string) !Prop
// Always show as unlocked for now to ensure compatibility
Property(LockDiscovery(''))
}
- else {
+ else {
// For any unimplemented property, return an empty string instead of panicking
// This improves compatibility with various WebDAV clients
log.info('[WebDAV] Unimplemented property requested: ${name}')
@@ -127,16 +143,24 @@ fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, pat
}
// main entry response
responses << PropfindResponse{
- href: ensure_leading_slash(if entry.is_dir() { '${path.trim_string_right('/')}/' } else { path })
+ href: ensure_leading_slash(if entry.is_dir() {
+ '${path.trim_string_right('/')}/'
+ } else {
+ path
+ })
// not_found: entry.get_unfound_properties(req)
found_props: properties
}
} else {
- responses << PropfindResponse{
- href: ensure_leading_slash(if entry.is_dir() { '${path.trim_string_right('/')}/' } else { path })
- // not_found: entry.get_unfound_properties(req)
- found_props: server.get_properties(entry)
- }
+ responses << PropfindResponse{
+ href: ensure_leading_slash(if entry.is_dir() {
+ '${path.trim_string_right('/')}/'
+ } else {
+ path
+ })
+ // not_found: entry.get_unfound_properties(req)
+ found_props: server.get_properties(entry)
+ }
}
if !entry.is_dir() || req.depth == .zero {
@@ -148,10 +172,10 @@ fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, pat
return responses
}
for e in entries {
- child_path := if path.ends_with('/') {
- path + e.get_metadata().name
- } else {
- path + '/' + e.get_metadata().name
+ child_path := if path.ends_with('/') {
+ path + e.get_metadata().name
+ } else {
+ path + '/' + e.get_metadata().name
}
responses << server.get_responses(e, PropfindRequest{
...req
diff --git a/lib/dav/webdav/server_test.v b/lib/dav/webdav/server_test.v
index c475180c..6914cc9b 100644
--- a/lib/dav/webdav/server_test.v
+++ b/lib/dav/webdav/server_test.v
@@ -487,11 +487,12 @@ fn test_server_propfind() ! {
assert ctx.res.header.get(.content_type)! == 'application/xml'
assert ctx.res.body.contains('')
-
+
// Now that we know the correct format, check for it - directories have both leading and trailing slashes
assert ctx.res.body.contains('/${root_dir}/')
// Should only include the requested resource
- assert !ctx.res.body.contains('/${file_in_root}') && !ctx.res.body.contains('/${file_in_root}')
+ assert !ctx.res.body.contains('/${file_in_root}')
+ && !ctx.res.body.contains('/${file_in_root}')
// Test PROPFIND with depth=1 (resource and immediate children)
mut ctx2 := Context{
diff --git a/lib/hero/actionprocessor/factory.v b/lib/hero/actionprocessor/factory.v
index 20f911e9..c9c3af01 100644
--- a/lib/hero/actionprocessor/factory.v
+++ b/lib/hero/actionprocessor/factory.v
@@ -10,7 +10,7 @@ import freeflowuniverse.herolib.core.redisclient
__global (
circle_global map[string]&CircleCoordinator
circle_default string
- action_queues map[string]&ActionQueue
+ action_queues map[string]&ActionQueue
)
// HeroRunner is the main factory for managing jobs, agents, services, circles and names
@@ -101,7 +101,7 @@ pub fn new(args_ CircleCoordinatorArgs) !&CircleCoordinator {
@[params]
pub struct ActionQueueArgs {
pub mut:
- name string = 'default' // Name of the queue
+ name string = 'default' // Name of the queue
redis_addr string // Redis server address, defaults to 'localhost:6379'
}
@@ -109,48 +109,48 @@ pub mut:
pub fn new_action_queue(args ActionQueueArgs) !&ActionQueue {
// Normalize the queue name
queue_name := texttools.name_fix(args.name)
-
+
// Check if queue already exists in global map
if queue_name in action_queues {
mut q := action_queues[queue_name] or { panic('bug') }
return q
}
-
+
// Set default Redis address if not provided
mut redis_addr := args.redis_addr
if redis_addr == '' {
redis_addr = 'localhost:6379'
}
-
+
// Create Redis client
mut redis := redisclient.new(redis_addr)!
-
+
// Create Redis queue
queue_key := 'actionqueue:${queue_name}'
mut redis_queue := redis.queue_get(queue_key)
-
+
// Create ActionQueue
mut action_queue := &ActionQueue{
- name: queue_name
+ name: queue_name
queue: &redis_queue
redis: redis
}
-
+
// Store in global map
action_queues[queue_name] = action_queue
-
+
return action_queue
}
// get_action_queue retrieves an existing ActionQueue or creates a new one
pub fn get_action_queue(name string) !&ActionQueue {
queue_name := texttools.name_fix(name)
-
+
if queue_name in action_queues {
mut q := action_queues[queue_name] or { panic('bug') }
return q
}
-
+
return new_action_queue(ActionQueueArgs{
name: queue_name
})!
@@ -159,17 +159,17 @@ pub fn get_action_queue(name string) !&ActionQueue {
// get_or_create_action_queue retrieves an existing ActionQueue for a CircleCoordinator or creates a new one
pub fn (mut cc CircleCoordinator) get_or_create_action_queue(name string) !&ActionQueue {
queue_name := texttools.name_fix(name)
-
+
if queue_name in cc.action_queues {
mut q := cc.action_queues[queue_name] or { panic('bug') }
return q
}
-
+
mut action_queue := new_action_queue(ActionQueueArgs{
name: queue_name
})!
-
+
cc.action_queues[queue_name] = action_queue
-
+
return action_queue
}
diff --git a/lib/hero/actionprocessor/queue.v b/lib/hero/actionprocessor/queue.v
index 344a0be7..578d064f 100644
--- a/lib/hero/actionprocessor/queue.v
+++ b/lib/hero/actionprocessor/queue.v
@@ -19,14 +19,14 @@ pub enum ActionJobStatus {
@[heap]
pub struct ActionJob {
pub mut:
- guid string
+ guid string
heroscript string
- created ourtime.OurTime
- deadline ourtime.OurTime
- status ActionJobStatus
- error string // Error message if job failed
- async bool // Whether the job should be processed asynchronously
- circleid string // ID of the circle this job belongs to
+ created ourtime.OurTime
+ deadline ourtime.OurTime
+ status ActionJobStatus
+ error string // Error message if job failed
+ async bool // Whether the job should be processed asynchronously
+ circleid string // ID of the circle this job belongs to
}
// ActionQueue is a queue of actions to be processed, which comes from a redis queue
@@ -44,15 +44,15 @@ pub fn new_action_job(heroscript string) ActionJob {
// Default deadline is 1 hour from now
mut deadline := ourtime.now()
deadline.warp('+1h') or { panic('Failed to set deadline: ${err}') }
-
+
return ActionJob{
- guid: time.now().unix_milli().str(),
- heroscript: heroscript,
- created: now,
- deadline: deadline,
- status: .pending,
- async: false,
- circleid: ''
+ guid: time.now().unix_milli().str()
+ heroscript: heroscript
+ created: now
+ deadline: deadline
+ status: .pending
+ async: false
+ circleid: ''
}
}
@@ -78,15 +78,15 @@ pub fn (job ActionJob) to_playbook() !&playbook.PlayBook {
if job.heroscript.trim_space() == '' {
return error('No heroscript content in job')
}
-
+
// Create a new PlayBook with the heroscript content
mut pb := playbook.new(text: job.heroscript)!
-
+
// Check if any actions were found
if pb.actions.len == 0 {
return error('No actions found in heroscript')
}
-
+
return &pb
}
@@ -104,7 +104,7 @@ pub fn (mut q ActionQueue) add_job(job ActionJob) ! {
if job.error != '' {
q.redis.hset(job_key, 'error', job.error)!
}
-
+
// Add the job reference to the queue
q.queue.add(job.guid)!
}
@@ -112,32 +112,32 @@ pub fn (mut q ActionQueue) add_job(job ActionJob) ! {
// get_job retrieves a job from Redis by its GUID
pub fn (mut q ActionQueue) get_job(guid string) !ActionJob {
job_key := 'heroactionjobs:${guid}'
-
+
// Check if the job exists
if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found')
}
-
+
// Retrieve job fields
mut job := ActionJob{
- guid: guid,
- heroscript: q.redis.hget(job_key, 'heroscript')!,
- status: ActionJobStatus.pending, // Default value, will be overwritten
- error: '', // Default empty error message
- async: false, // Default to synchronous
- circleid: '' // Default to empty circle ID
+ guid: guid
+ heroscript: q.redis.hget(job_key, 'heroscript')!
+ status: ActionJobStatus.pending // Default value, will be overwritten
+ error: '' // Default empty error message
+ async: false // Default to synchronous
+ circleid: '' // Default to empty circle ID
}
-
+
// Parse created time
created_str := q.redis.hget(job_key, 'created')!
created_unix := created_str.i64()
job.created = ourtime.new_from_epoch(u64(created_unix))
-
+
// Parse deadline
deadline_str := q.redis.hget(job_key, 'deadline')!
deadline_unix := deadline_str.i64()
job.deadline = ourtime.new_from_epoch(u64(deadline_unix))
-
+
// Parse status
status_str := q.redis.hget(job_key, 'status')!
match status_str {
@@ -148,29 +148,29 @@ pub fn (mut q ActionQueue) get_job(guid string) !ActionJob {
'cancelled' { job.status = .cancelled }
else { job.status = .pending } // Default to pending if unknown
}
-
+
// Get error message if exists
job.error = q.redis.hget(job_key, 'error') or { '' }
-
+
// Get async flag
async_str := q.redis.hget(job_key, 'async') or { 'false' }
job.async = async_str == 'true'
-
+
// Get circle ID
job.circleid = q.redis.hget(job_key, 'circleid') or { '' }
-
+
return job
}
// update_job_status updates the status of a job in Redis
pub fn (mut q ActionQueue) update_job_status(guid string, status ActionJobStatus) ! {
job_key := 'heroactionjobs:${guid}'
-
+
// Check if the job exists
if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found')
}
-
+
// Update status
q.redis.hset(job_key, 'status', status.str())!
}
@@ -178,12 +178,12 @@ pub fn (mut q ActionQueue) update_job_status(guid string, status ActionJobStatus
// set_job_failed marks a job as failed with an error message
pub fn (mut q ActionQueue) set_job_failed(guid string, error_msg string) ! {
job_key := 'heroactionjobs:${guid}'
-
+
// Check if the job exists
if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found')
}
-
+
// Update status and error message
q.redis.hset(job_key, 'status', ActionJobStatus.failed.str())!
q.redis.hset(job_key, 'error', error_msg)!
@@ -202,32 +202,32 @@ pub fn (mut q ActionQueue) find_failed_jobs() ![]ActionJob {
// and replaced with a more efficient implementation using SCAN
keys := q.redis.keys('heroactionjobs:*')!
mut failed_jobs := []ActionJob{}
-
+
for key in keys {
// Check if job is failed
status := q.redis.hget(key, 'status') or { continue }
if status == ActionJobStatus.failed.str() {
// Get the job GUID from the key
guid := key.all_after('heroactionjobs:')
-
+
// Get the full job
job := q.get_job(guid) or { continue }
failed_jobs << job
}
}
-
+
return failed_jobs
}
// delete_job deletes a job from Redis
pub fn (mut q ActionQueue) delete_job(guid string) ! {
job_key := 'heroactionjobs:${guid}'
-
+
// Check if the job exists
if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found')
}
-
+
// Delete the job
q.redis.del(job_key)!
}
diff --git a/lib/hero/actionprocessor/queue_test.v b/lib/hero/actionprocessor/queue_test.v
index 42e1fb1c..de87b8e5 100644
--- a/lib/hero/actionprocessor/queue_test.v
+++ b/lib/hero/actionprocessor/queue_test.v
@@ -7,26 +7,26 @@ fn test_action_job() {
// Create a new action job
heroscript := '!!action.test name:test1'
job := new_action_job(heroscript)
-
+
// Verify job properties
assert job.guid != ''
assert job.heroscript == heroscript
assert job.status == ActionJobStatus.pending
assert !job.created.empty()
assert !job.deadline.empty()
-
+
// Test JSON serialization
json_str := job.to_json()
job2 := action_job_from_json(json_str) or {
assert false, 'Failed to decode job from JSON: ${err}'
return
}
-
+
// Verify deserialized job
assert job2.guid == job.guid
assert job2.heroscript == job.heroscript
assert job2.status == job.status
-
+
// Test creating job with custom deadline
job3 := new_action_job_with_deadline(heroscript, '+2h') or {
assert false, 'Failed to create job with deadline: ${err}'
@@ -41,7 +41,7 @@ fn test_action_queue() {
println('Skipping Redis test (use -d test_with_redis to run)')
return
}
-
+
// Create a new action queue
queue_name := 'test_queue_${time.now().unix_milli()}'
mut queue := new_action_queue(ActionQueueArgs{
@@ -50,13 +50,13 @@ fn test_action_queue() {
assert false, 'Failed to create action queue: ${err}'
return
}
-
+
// Create test jobs
mut job1 := new_action_job('!!action.test1 name:test1')
mut job2 := new_action_job('!!action.test2 name:test2')
mut job3 := new_action_job('!!action.test3 name:test3')
mut job4 := new_action_job('!!action.test4 name:test4')
-
+
// Add jobs to the queue
queue.add_job(job1) or {
assert false, 'Failed to add job1: ${err}'
@@ -70,14 +70,14 @@ fn test_action_queue() {
assert false, 'Failed to add job3: ${err}'
return
}
-
+
// Test count_waiting_jobs
wait_count := queue.count_waiting_jobs() or {
assert false, 'Failed to count waiting jobs: ${err}'
return
}
assert wait_count == 3, 'Expected 3 waiting jobs, got ${wait_count}'
-
+
// Fetch jobs from the queue
fetched_job1 := queue.pop_job() or {
assert false, 'Failed to pop job1: ${err}'
@@ -85,20 +85,20 @@ fn test_action_queue() {
}
assert fetched_job1.guid == job1.guid
assert fetched_job1.heroscript == job1.heroscript
-
+
fetched_job2 := queue.pop_job() or {
assert false, 'Failed to pop job2: ${err}'
return
}
assert fetched_job2.guid == job2.guid
assert fetched_job2.heroscript == job2.heroscript
-
+
// Update job status
queue.update_job_status(job3.guid, .processing) or {
assert false, 'Failed to update job status: ${err}'
return
}
-
+
// Fetch job with updated status
fetched_job3 := queue.pop_job() or {
assert false, 'Failed to pop job3: ${err}'
@@ -106,19 +106,19 @@ fn test_action_queue() {
}
assert fetched_job3.guid == job3.guid
assert fetched_job3.status == .processing
-
+
// Test setting a job as failed with error message
queue.add_job(job4) or {
assert false, 'Failed to add job4: ${err}'
return
}
-
+
// Set job as failed
queue.set_job_failed(job4.guid, 'Test error message') or {
assert false, 'Failed to set job as failed: ${err}'
return
}
-
+
// Get the failed job and verify error message
failed_job := queue.get_job(job4.guid) or {
assert false, 'Failed to get failed job: ${err}'
@@ -126,7 +126,7 @@ fn test_action_queue() {
}
assert failed_job.status == .failed
assert failed_job.error == 'Test error message'
-
+
// Test finding failed jobs
failed_jobs := queue.find_failed_jobs() or {
assert false, 'Failed to find failed jobs: ${err}'
@@ -135,39 +135,39 @@ fn test_action_queue() {
assert failed_jobs.len > 0, 'Expected at least one failed job'
assert failed_jobs[0].guid == job4.guid
assert failed_jobs[0].error == 'Test error message'
-
+
// Delete a job
queue.delete_job(job3.guid) or {
assert false, 'Failed to delete job: ${err}'
return
}
-
+
// Try to get deleted job (should fail)
queue.get_job(job3.guid) or {
// Expected error
assert err.str().contains('not found')
return
}
-
+
// Test direct put and fetch to verify heroscript preservation
test_heroscript := '!!action.special name:direct_test param1:value1 param2:value2'
mut direct_job := new_action_job(test_heroscript)
-
+
// Add the job
queue.add_job(direct_job) or {
assert false, 'Failed to add direct job: ${err}'
return
}
-
+
// Fetch the job by GUID
fetched_direct_job := queue.get_job(direct_job.guid) or {
assert false, 'Failed to get direct job: ${err}'
return
}
-
+
// Verify the heroscript is preserved exactly
assert fetched_direct_job.heroscript == test_heroscript, 'Heroscript was not preserved correctly'
-
+
// Clean up
queue.delete() or {
assert false, 'Failed to delete queue: ${err}'
diff --git a/lib/hero/db/core/dbhandler.v b/lib/hero/db/core/dbhandler.v
index 3ccd9351..685c4945 100644
--- a/lib/hero/db/core/dbhandler.v
+++ b/lib/hero/db/core/dbhandler.v
@@ -40,6 +40,7 @@ pub fn (mut m DBHandler[T]) get_data(id u32) ![]u8 {
}
return item_data
}
+
pub fn (mut m DBHandler[T]) exists(id u32) !bool {
item_data := m.session_state.dbs.db_data_core.get(id) or { return false }
return item_data != []u8{}
diff --git a/lib/hero/db/managers/circle/user_db.v b/lib/hero/db/managers/circle/user_db.v
index 68daf872..e45d9463 100644
--- a/lib/hero/db/managers/circle/user_db.v
+++ b/lib/hero/db/managers/circle/user_db.v
@@ -1,7 +1,8 @@
module circle
import freeflowuniverse.herolib.hero.db.core { DBHandler, SessionState, new_dbhandler }
-import freeflowuniverse.herolib.hero.db.models.circle { User, Role }
+import freeflowuniverse.herolib.hero.db.models.circle { Role, User }
+
type UserObj = User
@[heap]
@@ -55,7 +56,7 @@ pub fn (mut m UserDB) delete(obj UserObj) ! {
// get_by_name retrieves a user by its name
pub fn (mut m UserDB) get_by_name(name string) !UserObj {
data := m.db.get_data_by_key('name', name)!
- return loads_user(data)!
+ return loads_user(data)!
}
// delete_by_name removes a user by its name
@@ -80,4 +81,4 @@ pub fn (mut m UserDB) update_user_role(name string, new_role Role) !UserObj {
// Save the updated user
return m.set(user)!
-}
\ No newline at end of file
+}
diff --git a/lib/hero/db/managers/circle/user_encoder.v b/lib/hero/db/managers/circle/user_encoder.v
index 7d86fc13..42953d7e 100644
--- a/lib/hero/db/managers/circle/user_encoder.v
+++ b/lib/hero/db/managers/circle/user_encoder.v
@@ -1,83 +1,80 @@
-
-
module circle
import freeflowuniverse.herolib.data.encoder
-import freeflowuniverse.herolib.hero.db.models.circle { User, Role }
-
+import freeflowuniverse.herolib.hero.db.models.circle { Role, User }
// dumps serializes a User struct to binary data
pub fn (user UserObj) dumps() ![]u8 {
mut e := encoder.new()
-
+
// Add version byte (v1)
e.add_u8(1)
-
+
// Encode Base struct fields
e.add_u32(user.Base.id)
e.add_ourtime(user.Base.creation_time)
e.add_ourtime(user.Base.mod_time)
-
+
// Encode comments array from Base
e.add_u16(u16(user.Base.comments.len))
for id in user.Base.comments {
e.add_u32(id)
}
-
+
// Encode User-specific fields
e.add_string(user.name)
e.add_string(user.description)
e.add_u8(u8(user.role)) // Encode enum as u8
-
+
// Encode contact_ids array
e.add_u16(u16(user.contact_ids.len))
for id in user.contact_ids {
e.add_u32(id)
}
-
+
// Encode wallet_ids array
e.add_u16(u16(user.wallet_ids.len))
for id in user.wallet_ids {
e.add_u32(id)
}
-
+
// Encode pubkey
e.add_string(user.pubkey)
-
+
return e.data
}
// loads deserializes binary data to a User struct
pub fn loads_user(data []u8) !User {
mut d := encoder.decoder_new(data)
-
+
// Read version byte
version := d.get_u8()!
if version != 1 {
return error('Unsupported version: ${version}')
}
-
+
// Create a new User instance
mut user := User{}
-
+
// Decode Base struct fields
user.id = d.get_u32()!
user.creation_time = d.get_ourtime()!
user.mod_time = d.get_ourtime()!
-
+
// Decode comments array from Base
comments_count := d.get_u16()!
user.comments = []u32{cap: int(comments_count)}
for _ in 0 .. comments_count {
user.comments << d.get_u32()!
}
-
+
// Decode User-specific fields
user.name = d.get_string()!
user.description = d.get_string()!
// Get the u8 value first
role_value := d.get_u8()!
-
+
// Validate and convert to Role enum
if role_value <= u8(Role.external) {
// Use unsafe block for casting number to enum as required by V
@@ -87,23 +84,23 @@ pub fn loads_user(data []u8) !User {
} else {
return error('Invalid role value: ${role_value}')
}
-
+
// Decode contact_ids array
contact_count := d.get_u16()!
user.contact_ids = []u32{cap: int(contact_count)}
for _ in 0 .. contact_count {
user.contact_ids << d.get_u32()!
}
-
+
// Decode wallet_ids array
wallet_count := d.get_u16()!
user.wallet_ids = []u32{cap: int(wallet_count)}
for _ in 0 .. wallet_count {
user.wallet_ids << d.get_u32()!
}
-
+
// Decode pubkey
user.pubkey = d.get_string()!
-
+
return user
-}
\ No newline at end of file
+}
diff --git a/lib/hero/db/managers/circle/user_test.v b/lib/hero/db/managers/circle/user_test.v
index e85718f1..aa912412 100644
--- a/lib/hero/db/managers/circle/user_test.v
+++ b/lib/hero/db/managers/circle/user_test.v
@@ -1,6 +1,6 @@
module circle
-import freeflowuniverse.herolib.hero.db.core { SessionState, new_session }
+import freeflowuniverse.herolib.hero.db.core { new_session }
import freeflowuniverse.herolib.hero.db.models.circle { Role }
import freeflowuniverse.herolib.data.ourtime
import os
@@ -8,7 +8,7 @@ import os
// test_user_db tests the functionality of the UserDB
pub fn test_user_db() ! {
println('Starting User DB Test')
-
+
// Create a temporary directory for the test
test_dir := os.join_path(os.temp_dir(), 'hero_user_test')
os.mkdir_all(test_dir) or { return error('Failed to create test directory: ${err}') }
@@ -16,20 +16,20 @@ pub fn test_user_db() ! {
// Clean up after test
os.rmdir_all(test_dir) or { eprintln('Failed to remove test directory: ${err}') }
}
-
+
// Create a new session state
mut session := new_session(
name: 'test_session'
path: test_dir
)!
-
+
println('Session created: ${session.name}')
-
+
// Initialize the UserDB
mut user_db := new_userdb(session)!
-
+
println('UserDB initialized')
-
+
// Create and add users
mut admin_user := user_db.new()
admin_user.name = 'admin_user'
@@ -41,11 +41,11 @@ pub fn test_user_db() ! {
// println(admin_user)
// if true{panic("sss")}
-
+
// Save the admin user
admin_user = user_db.set(admin_user)!
println('Admin user created with ID: ${admin_user.Base.id}')
-
+
// Create a regular member
mut member_user := user_db.new()
member_user.name = 'member_user'
@@ -54,11 +54,11 @@ pub fn test_user_db() ! {
member_user.pubkey = 'member_pubkey_456'
member_user.creation_time = ourtime.now()
member_user.mod_time = ourtime.now()
-
+
// Save the member user
member_user = user_db.set(member_user)!
println('Member user created with ID: ${member_user.Base.id}')
-
+
// Create a guest user
mut guest_user := user_db.new()
guest_user.name = 'guest_user'
@@ -67,48 +67,47 @@ pub fn test_user_db() ! {
guest_user.pubkey = 'guest_pubkey_789'
guest_user.creation_time = ourtime.now()
guest_user.mod_time = ourtime.now()
-
+
// Save the guest user
guest_user = user_db.set(guest_user)!
println('Guest user created with ID: ${guest_user.Base.id}')
-
+
// Retrieve users by ID
retrieved_admin := user_db.get(admin_user.Base.id)!
println('Retrieved admin user by ID: ${retrieved_admin.name} (Role: ${retrieved_admin.role})')
-
+
// Retrieve users by name
retrieved_member := user_db.get_by_name('member_user')!
println('Retrieved member user by name: ${retrieved_member.name} (Role: ${retrieved_member.role})')
-
+
// Update a user's role
updated_guest := user_db.update_user_role('guest_user', Role.contributor)!
println('Updated guest user role to contributor: ${updated_guest.name} (Role: ${updated_guest.role})')
-
+
// List all users
user_ids := user_db.list()!
println('Total users: ${user_ids.len}')
println('User IDs: ${user_ids}')
-
+
// Get all users
all_users := user_db.getall()!
println('All users:')
for user in all_users {
println(' - ${user.name} (ID: ${user.Base.id}, Role: ${user.role})')
}
-
+
// Delete a user
user_db.delete(member_user)!
println('Deleted member user with ID: ${member_user.Base.id}')
-
+
// Delete a user by name
user_db.delete_by_name('guest_user')!
println('Deleted guest user by name')
-
+
// List remaining users
remaining_user_ids := user_db.list()!
println('Remaining users: ${remaining_user_ids.len}')
println('Remaining user IDs: ${remaining_user_ids}')
-
+
println('User DB Test completed successfully')
}
-
diff --git a/lib/hero/db/models/base/base.v b/lib/hero/db/models/base/base.v
index 581017ec..ac7d24a3 100644
--- a/lib/hero/db/models/base/base.v
+++ b/lib/hero/db/models/base/base.v
@@ -5,9 +5,8 @@ import freeflowuniverse.herolib.data.ourtime
// our attempt to make a message object which can be used for email as well as chat
pub struct Base {
pub mut:
- id u32
- creation_time ourtime.OurTime
- mod_time ourtime.OurTime // Last modified time
- comments []u32
+ id u32
+ creation_time ourtime.OurTime
+ mod_time ourtime.OurTime // Last modified time
+ comments []u32
}
-
diff --git a/lib/hero/db/models/circle/domainnames.v b/lib/hero/db/models/circle/domainnames.v
index ba17b071..a17254e2 100644
--- a/lib/hero/db/models/circle/domainnames.v
+++ b/lib/hero/db/models/circle/domainnames.v
@@ -1,37 +1,38 @@
module circle
+
import freeflowuniverse.herolib.hero.db.models.base
// Define the RecordType enum
pub enum RecordType {
- a
- aaa
- cname
- mx
- ns
- ptr
- soa
- srv
- txt
+ a
+ aaa
+ cname
+ mx
+ ns
+ ptr
+ soa
+ srv
+ txt
}
// Define the DomainNamespace struct, represents a full domain with all its records
pub struct DomainNameSpace {
- base.Base
+ base.Base
pub mut:
- id u32
- domain string
- description string
- records []Record
- admins []u32 // IDs of the admins they need to exist as user in the circle
+ id u32
+ domain string
+ description string
+ records []Record
+ admins []u32 // IDs of the admins they need to exist as user in the circle
}
// Define the Record struct
pub struct Record {
pub mut:
- name string
- text string
- category RecordType
- addr []string
+ name string
+ text string
+ category RecordType
+ addr []string
}
pub fn (self DomainNameSpace) index_keys() map[string]string {
@@ -42,6 +43,6 @@ pub fn (self DomainNameSpace) index_keys() map[string]string {
pub fn (self DomainNameSpace) ftindex_keys() map[string]string {
return {
- 'description': self.description,
+ 'description': self.description
}
}
diff --git a/lib/hero/db/models/circle/group.v b/lib/hero/db/models/circle/group.v
index 58269892..b9ccd757 100644
--- a/lib/hero/db/models/circle/group.v
+++ b/lib/hero/db/models/circle/group.v
@@ -1,13 +1,14 @@
module circle
+
import freeflowuniverse.herolib.hero.db.models.base
-//there is one group called "everyone" which is the default group for all members and their roles
+// there is one group called "everyone" which is the default group for all members and their roles
pub struct Group {
- base.Base
+ base.Base
pub mut:
- name string // name of the group in a circle, the one "everyone" is the default group
- description string // optional description
- members []u32 // pointers to the members of this group
+ name string // name of the group in a circle, the one "everyone" is the default group
+ description string // optional description
+ members []u32 // pointers to the members of this group
}
pub fn (self Group) index_keys() map[string]string {
@@ -18,7 +19,7 @@ pub fn (self Group) index_keys() map[string]string {
pub fn (self Group) ftindex_keys() map[string]string {
return {
- 'description': self.description,
- 'members': self.members.map(it.str()).join(",")
+ 'description': self.description
+ 'members': self.members.map(it.str()).join(',')
}
}
diff --git a/lib/hero/db/models/circle/user.v b/lib/hero/db/models/circle/user.v
index aacbe577..5551a60f 100644
--- a/lib/hero/db/models/circle/user.v
+++ b/lib/hero/db/models/circle/user.v
@@ -10,19 +10,19 @@ pub enum Role {
member
contributor
guest
- external //means no right in this circle appart from we register this user
+ external // means no right in this circle appart from we register this user
}
// Member represents a member of a circle
pub struct User {
base.Base
pub mut:
- name string // name of the member as used in this circle
- description string // optional description which is relevant to this circle
- role Role // role of the member in the circle
- contact_ids []u32 // IDs of contacts linked to this member
- wallet_ids []u32 // IDs of wallets owned by this member which are relevant to this circle
- pubkey string // public key of the member as used in this circle
+ name string // name of the member as used in this circle
+ description string // optional description which is relevant to this circle
+ role Role // role of the member in the circle
+ contact_ids []u32 // IDs of contacts linked to this member
+ wallet_ids []u32 // IDs of wallets owned by this member which are relevant to this circle
+ pubkey string // public key of the member as used in this circle
}
pub fn (self User) index_keys() map[string]string {
@@ -33,6 +33,6 @@ pub fn (self User) index_keys() map[string]string {
pub fn (self User) ftindex_keys() map[string]string {
return {
- 'description': self.description,
+ 'description': self.description
}
}
diff --git a/lib/hero/db/models/finance/account.v b/lib/hero/db/models/finance/account.v
index 0dbbfb29..f43dda11 100644
--- a/lib/hero/db/models/finance/account.v
+++ b/lib/hero/db/models/finance/account.v
@@ -1,19 +1,19 @@
module finance
+
import freeflowuniverse.herolib.hero.db.models.base
pub struct Account {
- base.Base
-pub mut:
- name string //internal name of the account for the user
- user_id u32 //user id of the owner of the account
- description string //optional description of the account
- ledger string //describes the ledger/blockchain where the account is located e.g. "ethereum", "bitcoin" or other institutions
- address string //address of the account on the blockchain
- pubkey string
- assets []Asset
+ base.Base
+pub mut:
+ name string // internal name of the account for the user
+ user_id u32 // user id of the owner of the account
+ description string // optional description of the account
+ ledger string // describes the ledger/blockchain where the account is located e.g. "ethereum", "bitcoin" or other institutions
+ address string // address of the account on the blockchain
+ pubkey string
+ assets []Asset
}
-
pub fn (self Account) index_keys() map[string]string {
return {
'name': self.name
diff --git a/lib/hero/db/models/finance/asset.v b/lib/hero/db/models/finance/asset.v
index 3e797b34..4ced0085 100644
--- a/lib/hero/db/models/finance/asset.v
+++ b/lib/hero/db/models/finance/asset.v
@@ -1,27 +1,25 @@
module finance
+
import freeflowuniverse.herolib.hero.db.models.base
-
pub enum AssetType {
- erc20
- erc721
- erc1155
- native
-
+ erc20
+ erc721
+ erc1155
+ native
}
pub struct Asset {
- base.Base
+ base.Base
pub mut:
- name string
- description string
- amount f64
- address string //address of the asset on the blockchain or bank
- asset_type AssetType //type of the asset
- decimals u8 //number of decimals of the asset
+ name string
+ description string
+ amount f64
+ address string // address of the asset on the blockchain or bank
+ asset_type AssetType // type of the asset
+ decimals u8 // number of decimals of the asset
}
-
pub fn (self Asset) index_keys() map[string]string {
return {
'name': self.name
diff --git a/lib/hero/db/models/mcc/calendar.v b/lib/hero/db/models/mcc/calendar.v
index 470d0845..62d833b6 100644
--- a/lib/hero/db/models/mcc/calendar.v
+++ b/lib/hero/db/models/mcc/calendar.v
@@ -11,17 +11,16 @@ pub mut:
description string // Event details
location string // Event location
start_time ourtime.OurTime
- end_time ourtime.OurTime // End time
- all_day bool // True if it's an all-day event
- recurrence string // RFC 5545 Recurrence Rule (e.g., "FREQ=DAILY;COUNT=10")
- attendees []u32 // List of contact id's
- organizer u32 // The user (see circle) who created the event
- status string // "CONFIRMED", "CANCELLED", "TENTATIVE" //TODO: make enum
- color string // User-friendly color categorization, e.g., "red", "blue" //TODO: make enum
+ end_time ourtime.OurTime // End time
+ all_day bool // True if it's an all-day event
+ recurrence string // RFC 5545 Recurrence Rule (e.g., "FREQ=DAILY;COUNT=10")
+ attendees []u32 // List of contact id's
+ organizer u32 // The user (see circle) who created the event
+ status string // "CONFIRMED", "CANCELLED", "TENTATIVE" //TODO: make enum
+ color string // User-friendly color categorization, e.g., "red", "blue" //TODO: make enum
reminder []ourtime.OurTime // Reminder time before the event
}
-
pub fn (self Asset) index_keys() map[string]string {
return {
'name': self.name
diff --git a/lib/hero/db/models/mcc/contacts.v b/lib/hero/db/models/mcc/contacts.v
index a7e8e5e0..2c4cfd8f 100644
--- a/lib/hero/db/models/mcc/contacts.v
+++ b/lib/hero/db/models/mcc/contacts.v
@@ -6,24 +6,22 @@ import freeflowuniverse.herolib.hero.db.models.base
pub struct Contact {
base.Base
pub mut:
- name string //name of the contact as we use in this circle
- first_name string
- last_name string
- email []string
- tel []string
+ name string // name of the contact as we use in this circle
+ first_name string
+ last_name string
+ email []string
+ tel []string
}
-
-
pub fn (self Contact) index_keys() map[string]string {
- return map[string]string{} //TODO: name
+ return map[string]string{} // TODO: name
}
pub fn (self Contact) ftindex_keys() map[string]string {
return {
'first_name': self.first_name
- 'last_name': self.last_name
- 'email': self.email.join(', ')
- 'tel': self.tel.join(', ')
- }
+ 'last_name': self.last_name
+ 'email': self.email.join(', ')
+ 'tel': self.tel.join(', ')
+ }
}
diff --git a/lib/hero/db/models/mcc/message.v b/lib/hero/db/models/mcc/message.v
index 41c8d9ec..94c624a3 100644
--- a/lib/hero/db/models/mcc/message.v
+++ b/lib/hero/db/models/mcc/message.v
@@ -8,18 +8,18 @@ pub struct Message {
base.Base // Base struct for common fields
pub mut:
// Database ID
- id u32 // Database ID (assigned by DBHandler)
+ id u32 // Database ID (assigned by DBHandler)
message_id string // Unique identifier for the email
folder string // The folder this email belongs to (inbox, sent, drafts, etc.)
message string // The email body content
attachments []Attachment // Any file attachments
send_time ourtime.OurTime
-
- date i64 // Unix timestamp when the email was sent/received
- size u32 // Size of the message in bytes
- read bool // Whether the email has been read
- flagged bool // Whether the email has been flagged/starred
-
+
+ date i64 // Unix timestamp when the email was sent/received
+ size u32 // Size of the message in bytes
+ read bool // Whether the email has been read
+ flagged bool // Whether the email has been flagged/starred
+
// Header information
subject string
from []u32 // List of user IDs (or email addresses) who sent the email user needs to exist in circle where we use this
@@ -36,15 +36,13 @@ pub struct Attachment {
pub mut:
filename string
content_type string
- hash string // Hash of the attachment data
+ hash string // Hash of the attachment data
}
-
-
pub fn (self Message) index_keys() map[string]string {
- return map[string]string{}
+ return map[string]string{}
}
pub fn (self Message) ftindex_keys() map[string]string {
- return map[string]string{} //TODO: add subject and from to this and to and message
+ return map[string]string{} // TODO: add subject and from to this and to and message
}
diff --git a/lib/hero_old/zaz/models/company.v b/lib/hero_old/zaz/models/company.v
index a6161722..13e7e704 100644
--- a/lib/hero_old/zaz/models/company.v
+++ b/lib/hero_old/zaz/models/company.v
@@ -22,8 +22,8 @@ pub enum BusinessType {
// Company represents a company registered in the Freezone
pub struct Company {
pub mut:
- id u32
- name string
+ id u32
+ name string
registration_number string
incorporation_date ourtime.OurTime
fiscal_year_end string
@@ -68,9 +68,9 @@ pub fn (company Company) dumps() ![]u8 {
enc.add_u16(u16(company.shareholders.len))
for shareholder in company.shareholders {
// Encode each shareholder's fields
- enc.add_u32(shareholder.id)
- enc.add_u32(shareholder.company_id)
- enc.add_u32(shareholder.user_id)
+ enc.add_u32(shareholder.id)
+ enc.add_u32(shareholder.company_id)
+ enc.add_u32(shareholder.user_id)
enc.add_string(shareholder.name)
enc.add_string(shareholder.shares.str()) // Store shares as string to preserve precision
enc.add_string(shareholder.percentage.str()) // Store as string to preserve precision
@@ -98,10 +98,10 @@ pub fn company_loads(data []u8) !Company {
company.id = d.get_u32()!
company.name = d.get_string()!
company.registration_number = d.get_string()!
-
+
incorporation_date_str := d.get_string()!
company.incorporation_date = ourtime.new(incorporation_date_str)!
-
+
company.fiscal_year_end = d.get_string()!
company.email = d.get_string()!
company.phone = d.get_string()!
@@ -111,10 +111,10 @@ pub fn company_loads(data []u8) !Company {
company.industry = d.get_string()!
company.description = d.get_string()!
company.status = unsafe { CompanyStatus(d.get_u8()!) }
-
+
created_at_str := d.get_string()!
company.created_at = ourtime.new(created_at_str)!
-
+
updated_at_str := d.get_string()!
company.updated_at = ourtime.new(updated_at_str)!
@@ -132,18 +132,18 @@ pub fn company_loads(data []u8) !Company {
// Decode the percentage from string instead of f64
percentage_str := d.get_string()!
shareholder.percentage = percentage_str.f64()
-
+
shareholder.type_ = unsafe { ShareholderType(d.get_u8()!) }
-
+
since_str := d.get_string()!
shareholder.since = ourtime.new(since_str)!
-
+
shareholder_created_at_str := d.get_string()!
shareholder.created_at = ourtime.new(shareholder_created_at_str)!
-
+
shareholder_updated_at_str := d.get_string()!
shareholder.updated_at = ourtime.new(shareholder_updated_at_str)!
-
+
company.shareholders[i] = shareholder
}
diff --git a/lib/hero_old/zaz/models/meeting.v b/lib/hero_old/zaz/models/meeting.v
index 6851ba6d..a44df28a 100644
--- a/lib/hero_old/zaz/models/meeting.v
+++ b/lib/hero_old/zaz/models/meeting.v
@@ -46,16 +46,15 @@ pub mut:
// Attendee represents an attendee of a board meeting
pub struct Attendee {
pub mut:
- id u32
- meeting_id u32
- user_id u32
- name string
- role AttendeeRole
- status AttendeeStatus
- created_at ourtime.OurTime
+ id u32
+ meeting_id u32
+ user_id u32
+ name string
+ role AttendeeRole
+ status AttendeeStatus
+ created_at ourtime.OurTime
}
-
// dumps serializes the Meeting to a byte array
pub fn (meeting Meeting) dumps() ![]u8 {
mut enc := encoder.new()
@@ -105,18 +104,18 @@ pub fn meeting_loads(data []u8) !Meeting {
meeting.id = d.get_u32()!
meeting.company_id = d.get_u32()!
meeting.title = d.get_string()!
-
+
date_str := d.get_string()!
meeting.date = ourtime.new(date_str)!
-
+
meeting.location = d.get_string()!
meeting.description = d.get_string()!
meeting.status = unsafe { MeetingStatus(d.get_u8()!) }
meeting.minutes = d.get_string()!
-
+
created_at_str := d.get_string()!
meeting.created_at = ourtime.new(created_at_str)!
-
+
updated_at_str := d.get_string()!
meeting.updated_at = ourtime.new(updated_at_str)!
@@ -131,10 +130,10 @@ pub fn meeting_loads(data []u8) !Meeting {
attendee.name = d.get_string()!
attendee.role = unsafe { AttendeeRole(d.get_u8()!) }
attendee.status = unsafe { AttendeeStatus(d.get_u8()!) }
-
+
attendee_created_at_str := d.get_string()!
attendee.created_at = ourtime.new(attendee_created_at_str)!
-
+
meeting.attendees[i] = attendee
}
diff --git a/lib/hero_old/zaz/models/product.v b/lib/hero_old/zaz/models/product.v
index 7168e5df..fe4d299b 100644
--- a/lib/hero_old/zaz/models/product.v
+++ b/lib/hero_old/zaz/models/product.v
@@ -2,7 +2,6 @@ module models
import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.data.encoder
-
import freeflowuniverse.herolib.data.currency
import freeflowuniverse.herolib.core.texttools { name_fix }
@@ -32,19 +31,19 @@ pub mut:
// Product represents a product or service offered by the Freezone
pub struct Product {
pub mut:
- id u32
- name string
- description string
- price currency.Currency
- type_ ProductType
- category string
- status ProductStatus
- created_at ourtime.OurTime
- updated_at ourtime.OurTime
- max_amount u16 // means allows us to define how many max of this there are
+ id u32
+ name string
+ description string
+ price currency.Currency
+ type_ ProductType
+ category string
+ status ProductStatus
+ created_at ourtime.OurTime
+ updated_at ourtime.OurTime
+ max_amount u16 // means allows us to define how many max of this there are
purchase_till ourtime.OurTime
- active_till ourtime.OurTime // after this product no longer active if e.g. a service
- components []ProductComponent
+ active_till ourtime.OurTime // after this product no longer active if e.g. a service
+ components []ProductComponent
}
// dumps serializes the Product to a byte array
@@ -58,11 +57,11 @@ pub fn (product Product) dumps() ![]u8 {
enc.add_u32(product.id)
enc.add_string(product.name)
enc.add_string(product.description)
-
+
// Store Currency as serialized data
currency_bytes := product.price.to_bytes()!
enc.add_bytes(currency_bytes.data)
-
+
enc.add_u8(u8(product.type_))
enc.add_string(name_fix(product.category))
enc.add_u8(u8(product.status))
@@ -71,7 +70,7 @@ pub fn (product Product) dumps() ![]u8 {
enc.add_u16(product.max_amount)
enc.add_string(product.purchase_till.str())
enc.add_string(product.active_till.str())
-
+
// Encode components array
enc.add_u16(u16(product.components.len))
for component in product.components {
@@ -101,30 +100,32 @@ pub fn product_loads(data []u8) !Product {
product.id = d.get_u32()!
product.name = d.get_string()!
product.description = d.get_string()!
-
+
// Decode Currency from bytes
price_bytes := d.get_bytes()!
- currency_bytes := currency.CurrencyBytes{data: price_bytes}
+ currency_bytes := currency.CurrencyBytes{
+ data: price_bytes
+ }
product.price = currency.from_bytes(currency_bytes)!
-
+
product.type_ = unsafe { ProductType(d.get_u8()!) }
product.category = d.get_string()!
product.status = unsafe { ProductStatus(d.get_u8()!) }
-
+
created_at_str := d.get_string()!
product.created_at = ourtime.new(created_at_str)!
-
+
updated_at_str := d.get_string()!
product.updated_at = ourtime.new(updated_at_str)!
-
+
product.max_amount = d.get_u16()!
-
+
purchase_till_str := d.get_string()!
product.purchase_till = ourtime.new(purchase_till_str)!
-
+
active_till_str := d.get_string()!
product.active_till = ourtime.new(active_till_str)!
-
+
// Decode components array
components_len := d.get_u16()!
product.components = []ProductComponent{len: int(components_len)}
@@ -134,13 +135,13 @@ pub fn product_loads(data []u8) !Product {
component.name = d.get_string()!
component.description = d.get_string()!
component.quantity = d.get_int()!
-
+
component_created_at_str := d.get_string()!
component.created_at = ourtime.new(component_created_at_str)!
-
+
component_updated_at_str := d.get_string()!
component.updated_at = ourtime.new(component_updated_at_str)!
-
+
product.components[i] = component
}
diff --git a/lib/hero_old/zaz/models/sale.v b/lib/hero_old/zaz/models/sale.v
index 9080d012..c9046315 100644
--- a/lib/hero_old/zaz/models/sale.v
+++ b/lib/hero_old/zaz/models/sale.v
@@ -2,7 +2,6 @@ module models
import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.data.encoder
-
import freeflowuniverse.herolib.data.currency
// SaleStatus represents the status of a sale
@@ -39,7 +38,6 @@ pub mut:
active_till ourtime.OurTime // after this product no longer active if e.g. a service
}
-
// dumps serializes the Sale to a byte array
pub fn (sale Sale) dumps() ![]u8 {
mut enc := encoder.new()
@@ -52,11 +50,11 @@ pub fn (sale Sale) dumps() ![]u8 {
enc.add_u32(sale.company_id)
enc.add_string(sale.buyer_name)
enc.add_string(sale.buyer_email)
-
+
// Store Currency as serialized data
total_amount_bytes := sale.total_amount.to_bytes()!
enc.add_bytes(total_amount_bytes.data)
-
+
enc.add_u8(u8(sale.status))
enc.add_string(sale.sale_date.str())
enc.add_string(sale.created_at.str())
@@ -70,14 +68,14 @@ pub fn (sale Sale) dumps() ![]u8 {
enc.add_u32(item.product_id)
enc.add_string(item.name)
enc.add_int(item.quantity)
-
+
// Store Currency as serialized data
unit_price_bytes := item.unit_price.to_bytes()!
enc.add_bytes(unit_price_bytes.data)
-
+
subtotal_bytes := item.subtotal.to_bytes()!
enc.add_bytes(subtotal_bytes.data)
-
+
enc.add_string(item.active_till.str())
}
@@ -100,20 +98,22 @@ pub fn sale_loads(data []u8) !Sale {
sale.company_id = d.get_u32()!
sale.buyer_name = d.get_string()!
sale.buyer_email = d.get_string()!
-
+
// Decode Currency from bytes
total_amount_bytes := d.get_bytes()!
- currency_bytes := currency.CurrencyBytes{data: total_amount_bytes}
+ currency_bytes := currency.CurrencyBytes{
+ data: total_amount_bytes
+ }
sale.total_amount = currency.from_bytes(currency_bytes)!
-
+
sale.status = unsafe { SaleStatus(d.get_u8()!) }
-
+
sale_date_str := d.get_string()!
sale.sale_date = ourtime.new(sale_date_str)!
-
+
created_at_str := d.get_string()!
sale.created_at = ourtime.new(created_at_str)!
-
+
updated_at_str := d.get_string()!
sale.updated_at = ourtime.new(updated_at_str)!
@@ -127,19 +127,23 @@ pub fn sale_loads(data []u8) !Sale {
item.product_id = d.get_u32()!
item.name = d.get_string()!
item.quantity = d.get_int()!
-
+
// Decode Currency from bytes
unit_price_bytes := d.get_bytes()!
- unit_price_currency_bytes := currency.CurrencyBytes{data: unit_price_bytes}
+ unit_price_currency_bytes := currency.CurrencyBytes{
+ data: unit_price_bytes
+ }
item.unit_price = currency.from_bytes(unit_price_currency_bytes)!
-
+
subtotal_bytes := d.get_bytes()!
- subtotal_currency_bytes := currency.CurrencyBytes{data: subtotal_bytes}
+ subtotal_currency_bytes := currency.CurrencyBytes{
+ data: subtotal_bytes
+ }
item.subtotal = currency.from_bytes(subtotal_currency_bytes)!
-
+
active_till_str := d.get_string()!
item.active_till = ourtime.new(active_till_str)!
-
+
sale.items[i] = item
}
diff --git a/lib/hero_old/zaz/models/shareholder.v b/lib/hero_old/zaz/models/shareholder.v
index e0c1c9f1..47067da0 100644
--- a/lib/hero_old/zaz/models/shareholder.v
+++ b/lib/hero_old/zaz/models/shareholder.v
@@ -64,18 +64,18 @@ pub fn shareholder_loads(data []u8) !Shareholder {
shareholder.name = d.get_string()!
shares_str := d.get_string()!
shareholder.shares = shares_str.f64()
-
+
percentage_str := d.get_string()!
shareholder.percentage = percentage_str.f64()
-
- shareholder.type_ = unsafe { ShareholderType(d.get_u8()!) }
-
+
+ shareholder.type_ = unsafe { ShareholderType(d.get_u8()!) }
+
since_str := d.get_string()!
shareholder.since = ourtime.new(since_str)!
-
+
created_at_str := d.get_string()!
shareholder.created_at = ourtime.new(created_at_str)!
-
+
updated_at_str := d.get_string()!
shareholder.updated_at = ourtime.new(updated_at_str)!
diff --git a/lib/hero_old/zaz/models/user.v b/lib/hero_old/zaz/models/user.v
index f0622be9..209936f5 100644
--- a/lib/hero_old/zaz/models/user.v
+++ b/lib/hero_old/zaz/models/user.v
@@ -6,12 +6,12 @@ import freeflowuniverse.herolib.data.encoder
// User represents a user in the Freezone Manager system
pub struct User {
pub mut:
- id u32
- name string
- email string
- password string
- company string //here its just a best effort
- role string
+ id u32
+ name string
+ email string
+ password string
+ company string // here its just a best effort
+ role string
created_at ourtime.OurTime
updated_at ourtime.OurTime
}
@@ -54,10 +54,10 @@ pub fn user_loads(data []u8) !User {
user.password = d.get_string()!
user.company = d.get_string()!
user.role = d.get_string()!
-
+
created_at_str := d.get_string()!
user.created_at = ourtime.new(created_at_str)!
-
+
updated_at_str := d.get_string()!
user.updated_at = ourtime.new(updated_at_str)!
diff --git a/lib/hero_old/zaz/models/vote.v b/lib/hero_old/zaz/models/vote.v
index 4c8c3bfc..85789fdd 100644
--- a/lib/hero_old/zaz/models/vote.v
+++ b/lib/hero_old/zaz/models/vote.v
@@ -30,11 +30,11 @@ pub mut:
// VoteOption represents an option in a vote
pub struct VoteOption {
pub mut:
- id u8
- vote_id u32
- text string
- count int
- min_valid int // min votes we need to make total vote count
+ id u8
+ vote_id u32
+ text string
+ count int
+ min_valid int // min votes we need to make total vote count
}
// the vote as done by the user
@@ -112,18 +112,18 @@ pub fn vote_loads(data []u8) !Vote {
vote.company_id = d.get_u32()!
vote.title = d.get_string()!
vote.description = d.get_string()!
-
+
start_date_str := d.get_string()!
vote.start_date = ourtime.new(start_date_str)!
-
+
end_date_str := d.get_string()!
vote.end_date = ourtime.new(end_date_str)!
-
+
vote.status = unsafe { VoteStatus(d.get_u8()!) }
-
+
created_at_str := d.get_string()!
vote.created_at = ourtime.new(created_at_str)!
-
+
updated_at_str := d.get_string()!
vote.updated_at = ourtime.new(updated_at_str)!
@@ -150,10 +150,10 @@ pub fn vote_loads(data []u8) !Vote {
ballot.user_id = d.get_u32()!
ballot.vote_option_id = d.get_u8()!
ballot.shares_count = d.get_int()!
-
+
ballot_created_at_str := d.get_string()!
ballot.created_at = ourtime.new(ballot_created_at_str)!
-
+
vote.ballots[i] = ballot
}
diff --git a/lib/hero_old/zaz/models/vote_test.v b/lib/hero_old/zaz/models/vote_test.v
index dddf610a..1818484a 100644
--- a/lib/hero_old/zaz/models/vote_test.v
+++ b/lib/hero_old/zaz/models/vote_test.v
@@ -6,51 +6,51 @@ import freeflowuniverse.herolib.data.encoder
fn test_vote_serialization() {
// Create test data for a vote with options and ballots
mut vote := Vote{
- id: 1001
- company_id: 2001
- title: 'Annual Board Election'
+ id: 1001
+ company_id: 2001
+ title: 'Annual Board Election'
description: 'Vote for the new board members'
- start_date: ourtime.new('2025-01-01 00:00:00')!
- end_date: ourtime.new('2025-01-31 23:59:59')!
- status: VoteStatus.open
- created_at: ourtime.new('2024-12-15 10:00:00')!
- updated_at: ourtime.new('2024-12-15 10:00:00')!
- options: []
- ballots: []
+ start_date: ourtime.new('2025-01-01 00:00:00')!
+ end_date: ourtime.new('2025-01-31 23:59:59')!
+ status: VoteStatus.open
+ created_at: ourtime.new('2024-12-15 10:00:00')!
+ updated_at: ourtime.new('2024-12-15 10:00:00')!
+ options: []
+ ballots: []
}
// Add vote options
vote.options << VoteOption{
- id: 101
- vote_id: 1001
- text: 'Option A'
- count: 0
+ id: 101
+ vote_id: 1001
+ text: 'Option A'
+ count: 0
min_valid: 10
}
vote.options << VoteOption{
- id: 102
- vote_id: 1001
- text: 'Option B'
- count: 0
+ id: 102
+ vote_id: 1001
+ text: 'Option B'
+ count: 0
min_valid: 5
}
// Add ballots
vote.ballots << Ballot{
- id: 501
- vote_id: 1001
- user_id: 301
+ id: 501
+ vote_id: 1001
+ user_id: 301
vote_option_id: 101
- shares_count: 100
- created_at: ourtime.new('2025-01-05 14:30:00')!
+ shares_count: 100
+ created_at: ourtime.new('2025-01-05 14:30:00')!
}
vote.ballots << Ballot{
- id: 502
- vote_id: 1001
- user_id: 302
+ id: 502
+ vote_id: 1001
+ user_id: 302
vote_option_id: 102
- shares_count: 50
- created_at: ourtime.new('2025-01-06 09:15:00')!
+ shares_count: 50
+ created_at: ourtime.new('2025-01-06 09:15:00')!
}
// Test serialization
@@ -102,17 +102,17 @@ fn test_vote_serialization() {
fn test_vote_serialization_empty_collections() {
// Test with empty options and ballots
mut vote := Vote{
- id: 1002
- company_id: 2001
- title: 'Simple Vote'
+ id: 1002
+ company_id: 2001
+ title: 'Simple Vote'
description: 'Vote with no options or ballots yet'
- start_date: ourtime.new('2025-02-01 00:00:00')!
- end_date: ourtime.new('2025-02-28 23:59:59')!
- status: VoteStatus.open
- created_at: ourtime.new('2025-01-15 10:00:00')!
- updated_at: ourtime.new('2025-01-15 10:00:00')!
- options: []
- ballots: []
+ start_date: ourtime.new('2025-02-01 00:00:00')!
+ end_date: ourtime.new('2025-02-28 23:59:59')!
+ status: VoteStatus.open
+ created_at: ourtime.new('2025-01-15 10:00:00')!
+ updated_at: ourtime.new('2025-01-15 10:00:00')!
+ options: []
+ ballots: []
}
// Test serialization
@@ -133,13 +133,13 @@ fn test_vote_serialization_empty_collections() {
fn test_vote_index_keys() {
// Test the index_keys function
vote := Vote{
- id: 1003
+ id: 1003
company_id: 2002
- title: 'Test Vote'
+ title: 'Test Vote'
}
keys := vote.index_keys()
-
+
assert keys['id'] == '1003'
assert keys['company_id'] == '2002'
}
@@ -148,7 +148,7 @@ fn test_vote_serialization_invalid_id() {
// Create invalid encoded data with wrong encoding ID
mut enc := encoder.new()
enc.add_u16(999) // Wrong ID (should be 406)
-
+
// Should return an error when decoding
if res := vote_loads(enc.data) {
assert false, 'Expected error for wrong encoding ID, but got success'
@@ -160,44 +160,44 @@ fn test_vote_serialization_invalid_id() {
fn test_vote_serialization_byte_structure() {
// Create a simple vote with minimal data for predictable byte structure
mut vote := Vote{
- id: 5
- company_id: 10
- title: 'Test'
+ id: 5
+ company_id: 10
+ title: 'Test'
description: 'Desc'
- start_date: ourtime.new('2025-01-01 00:00:00')!
- end_date: ourtime.new('2025-01-02 00:00:00')!
- status: VoteStatus.open
- created_at: ourtime.new('2025-01-01 00:00:00')!
- updated_at: ourtime.new('2025-01-01 00:00:00')!
- options: []
- ballots: []
+ start_date: ourtime.new('2025-01-01 00:00:00')!
+ end_date: ourtime.new('2025-01-02 00:00:00')!
+ status: VoteStatus.open
+ created_at: ourtime.new('2025-01-01 00:00:00')!
+ updated_at: ourtime.new('2025-01-01 00:00:00')!
+ options: []
+ ballots: []
}
// Add one simple option
vote.options << VoteOption{
- id: 1
- vote_id: 5
- text: 'Yes'
- count: 0
+ id: 1
+ vote_id: 5
+ text: 'Yes'
+ count: 0
min_valid: 1
}
// Add one simple ballot
vote.ballots << Ballot{
- id: 1
- vote_id: 5
- user_id: 1
+ id: 1
+ vote_id: 5
+ user_id: 1
vote_option_id: 1
- shares_count: 10
- created_at: ourtime.new('2025-01-01 01:00:00')!
+ shares_count: 10
+ created_at: ourtime.new('2025-01-01 01:00:00')!
}
// Serialize the vote
serialized := vote.dumps()!
-
+
// Create a decoder to check the byte structure
mut d := encoder.decoder_new(serialized)
-
+
// Verify the encoding structure byte by byte
assert d.get_u16()! == 406 // Encoding ID
assert d.get_u32()! == 5 // vote.id
@@ -213,7 +213,7 @@ fn test_vote_serialization_byte_structure() {
assert created_at.starts_with('2025-01-01 00:00') // vote.created_at
updated_at := d.get_string()!
assert updated_at.starts_with('2025-01-01 00:00') // vote.updated_at
-
+
// Options array
assert d.get_u16()! == 1 // options.len
assert d.get_u8()! == 1 // option.id
@@ -221,7 +221,7 @@ fn test_vote_serialization_byte_structure() {
assert d.get_string()! == 'Yes' // option.text
assert d.get_int()! == 0 // option.count
assert d.get_int()! == 1 // option.min_valid
-
+
// Ballots array
assert d.get_u16()! == 1 // ballots.len
assert d.get_u32()! == 1 // ballot.id
@@ -231,7 +231,7 @@ fn test_vote_serialization_byte_structure() {
assert d.get_int()! == 10 // ballot.shares_count
ballot_created_at := d.get_string()!
assert ballot_created_at.starts_with('2025-01-01 01:00') // ballot.created_at
-
+
// Private group array
assert d.get_u16()! == 0 // private_group.len
}
diff --git a/lib/lang/rhai/generate_rhai_example.v b/lib/lang/rhai/generate_rhai_example.v
index f6a4aeb1..75e799e9 100644
--- a/lib/lang/rhai/generate_rhai_example.v
+++ b/lib/lang/rhai/generate_rhai_example.v
@@ -6,37 +6,36 @@ import freeflowuniverse.herolib.ai.escalayer
pub struct WrapperGenerator {
pub:
function string
- structs []string
+ structs []string
}
-
// given a list of rhai functions and structs, generate a Rhai example script
pub fn generate_rhai_example(functions []string, structs []string) !string {
mut task := escalayer.new_task(
- name: 'generate_rhai_function_wrapper'
- description: 'Create a single Rhai wrapper for a Rust function'
- )
+ name: 'generate_rhai_function_wrapper'
+ description: 'Create a single Rhai wrapper for a Rust function'
+ )
- mut gen := WrapperGenerator {
+ mut gen := WrapperGenerator{
function: functions
- structs: structs
+ structs: structs
}
- // Define a single unit task that handles everything
- task.new_unit_task(
- name: 'generate_rhai_function_wrapper'
- prompt_function: gen.generate_rhai_function_wrapper_prompt
- callback_function: gen.generate_rhai_function_wrapper_callback
- base_model: escalayer.claude_3_sonnet // Use actual model identifier
- retry_model: escalayer.claude_3_sonnet // Use actual model identifier
- retry_count: 2
- )
+ // Define a single unit task that handles everything
+ task.new_unit_task(
+ name: 'generate_rhai_function_wrapper'
+ prompt_function: gen.generate_rhai_function_wrapper_prompt
+ callback_function: gen.generate_rhai_function_wrapper_callback
+ base_model: escalayer.claude_3_sonnet // Use actual model identifier
+ retry_model: escalayer.claude_3_sonnet // Use actual model identifier
+ retry_count: 2
+ )
- return task.initiate('')
+ return task.initiate('')
}
pub fn (gen WrapperGenerator) generate_rhai_function_wrapper_prompt(input string) string {
- return $tmpl('./prompts/generate_rhai_function_wrapper.md')
+ return $tmpl('./prompts/generate_rhai_function_wrapper.md')
}
// generate_rhai_function_wrapper_callback validates the generated Rhai wrapper.
@@ -47,9 +46,9 @@ pub fn (gen WrapperGenerator) generate_rhai_function_wrapper_prompt(input string
// Returns:
// !string: The validated wrapper code or an error.
pub fn (gen WrapperGenerator) generate_rhai_function_wrapper_callback(output string) !string {
- verify_rhai_wrapper(gen.function, gen.structs, output) or {
+ verify_rhai_wrapper(gen.function, gen.structs, output) or {
log.error('Failed to verify, will retry ${err}')
return err
}
return output
-}
\ No newline at end of file
+}
diff --git a/lib/lang/rhai/generate_wrapper_module.v b/lib/lang/rhai/generate_wrapper_module.v
index 08b62800..db5e83c0 100644
--- a/lib/lang/rhai/generate_wrapper_module.v
+++ b/lib/lang/rhai/generate_wrapper_module.v
@@ -1,20 +1,21 @@
module rhai
import freeflowuniverse.herolib.lang.rust
+import os
// generates rhai wrapper for given source rust code
pub fn generate_wrapper_module(name string, source string, destination string) !string {
- source_pkg_info := rust.detect_source_package(source_path)!
- code := rust.read_source_code(source)!
+ source_pkg_info := rust.detect_source_package(source_path)!
+ code := rust.read_source_code(source)!
functions := get_functions(code)
structs := get_structs(code)
rhai_functions := generate_rhai_function_wrappers(functions, structs)
-
+
// engine registration functions templated in engine.rs
register_functions_rs := generate_rhai_register_functions(functions)
register_types_rs := generate_rhai_register_types(structs)
-
+
pathlib.get_file(os.join_path(destination, 'cargo.toml'))!
.write($tmpl('./templates/cargo.toml'))
@@ -32,4 +33,4 @@ pub fn generate_wrapper_module(name string, source string, destination string) !
pathlib.get_file(os.join_path(destination, 'examples/example.rhai'))!
.write(generate_example_rhai_script(code))
-}
\ No newline at end of file
+}
diff --git a/lib/lang/rhai/register_types.v b/lib/lang/rhai/register_types.v
index a064378f..a4dc707d 100644
--- a/lib/lang/rhai/register_types.v
+++ b/lib/lang/rhai/register_types.v
@@ -22,7 +22,7 @@ pub fn generate_rhai_registration(rust_struct_definition string) !string {
mut sb := strings.new_builder(1024)
struct_name_lower := struct_name.to_lower()
- //sb.writeln('/// Register ${struct_name} type with the Rhai engine')
+ // sb.writeln('/// Register ${struct_name} type with the Rhai engine')
sb.writeln('fn register_${struct_name_lower}_type(engine: &mut Engine) -> Result<(), Box> {')
// Register the type itself
sb.writeln('\t// Register ${struct_name} type')
diff --git a/lib/lang/rhai/register_types_test.v b/lib/lang/rhai/register_types_test.v
index de7349a0..8aa9a0b2 100644
--- a/lib/lang/rhai/register_types_test.v
+++ b/lib/lang/rhai/register_types_test.v
@@ -28,7 +28,7 @@ fn test_generate_container_registration() {
// Generate the code using the extracted struct definition
generated_code := generate_rhai_registration(rust_def) or {
assert false, 'generate_rhai_registration failed: ${err}'
- return // Need to return to satisfy compiler
+ return
}
// Compare the generated code with the expected output
@@ -36,7 +36,7 @@ fn test_generate_container_registration() {
mut generated_trimmed := generated_code // Create mutable copies
mut expected_trimmed := expected_output
generated_trimmed.trim_space() // Modify in place
- expected_trimmed.trim_space() // Modify in place
+ expected_trimmed.trim_space() // Modify in place
assert generated_trimmed == expected_trimmed, 'Generated code does not match expected output.\n--- Generated:\n${generated_trimmed}\n--- Expected:\n${expected_trimmed}'
// Optional: print the results for verification
diff --git a/lib/lang/rhai/rhai.v b/lib/lang/rhai/rhai.v
index a16abbf8..8d83bf2c 100644
--- a/lib/lang/rhai/rhai.v
+++ b/lib/lang/rhai/rhai.v
@@ -6,7 +6,7 @@ import freeflowuniverse.herolib.ai.escalayer
pub struct WrapperGenerator {
pub:
function string
- structs []string
+ structs []string
}
// generate_rhai_function_wrapper generates a Rhai wrapper function for a given Rust function.
@@ -19,30 +19,30 @@ pub:
// !string: The generated Rhai wrapper function code or an error.
pub fn generate_rhai_function_wrapper(rust_function string, struct_declarations []string) !string {
mut task := escalayer.new_task(
- name: 'generate_rhai_function_wrapper'
- description: 'Create a single Rhai wrapper for a Rust function'
- )
+ name: 'generate_rhai_function_wrapper'
+ description: 'Create a single Rhai wrapper for a Rust function'
+ )
- mut gen := WrapperGenerator {
+ mut gen := WrapperGenerator{
function: rust_function
- structs: struct_declarations
+ structs: struct_declarations
}
- // Define a single unit task that handles everything
- task.new_unit_task(
- name: 'generate_rhai_function_wrapper'
- prompt_function: gen.generate_rhai_function_wrapper_prompt
- callback_function: gen.generate_rhai_function_wrapper_callback
- base_model: escalayer.claude_3_sonnet // Use actual model identifier
- retry_model: escalayer.claude_3_sonnet // Use actual model identifier
- retry_count: 2
- )
+ // Define a single unit task that handles everything
+ task.new_unit_task(
+ name: 'generate_rhai_function_wrapper'
+ prompt_function: gen.generate_rhai_function_wrapper_prompt
+ callback_function: gen.generate_rhai_function_wrapper_callback
+ base_model: escalayer.claude_3_sonnet // Use actual model identifier
+ retry_model: escalayer.claude_3_sonnet // Use actual model identifier
+ retry_count: 2
+ )
- return task.initiate('')
+ return task.initiate('')
}
pub fn (gen WrapperGenerator) generate_rhai_function_wrapper_prompt(input string) string {
- return $tmpl('./prompts/generate_rhai_function_wrapper.md')
+ return $tmpl('./prompts/generate_rhai_function_wrapper.md')
}
// generate_rhai_function_wrapper_callback validates the generated Rhai wrapper.
@@ -53,9 +53,9 @@ pub fn (gen WrapperGenerator) generate_rhai_function_wrapper_prompt(input string
// Returns:
// !string: The validated wrapper code or an error.
pub fn (gen WrapperGenerator) generate_rhai_function_wrapper_callback(output string) !string {
- verify_rhai_wrapper(gen.function, gen.structs, output) or {
+ verify_rhai_wrapper(gen.function, gen.structs, output) or {
log.error('Failed to verify, will retry ${err}')
return err
}
return output
-}
\ No newline at end of file
+}
diff --git a/lib/lang/rhai/rhai_test.v b/lib/lang/rhai/rhai_test.v
index 1be47f3c..7d3ef544 100644
--- a/lib/lang/rhai/rhai_test.v
+++ b/lib/lang/rhai/rhai_test.v
@@ -9,141 +9,141 @@ import os
const test_data_file = os.dir(@FILE) + '/testdata/functions.rs' // Use path relative to this test file
fn testsuite_begin() {
- // Optional: Setup code before tests run
- if !os.exists(test_data_file) {
- panic('Test data file not found: ${test_data_file}')
- }
+ // Optional: Setup code before tests run
+ if !os.exists(test_data_file) {
+ panic('Test data file not found: ${test_data_file}')
+ }
}
fn testsuite_end() {
- // Optional: Teardown code after tests run
+ // Optional: Teardown code after tests run
}
// --- Test Cases ---
fn test_generate_wrapper_simple_function() {
- rust_fn_name := 'add'
- // Call directly using suspected correct function name
- rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or {
- assert false, 'Failed to get function signature for ${rust_fn_name}: ${err}'
- return
- }
- struct_decls := []string{}
+ rust_fn_name := 'add'
+ // Call directly using suspected correct function name
+ rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or {
+ assert false, 'Failed to get function signature for ${rust_fn_name}: ${err}'
+ return
+ }
+ struct_decls := []string{}
- generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
- assert false, 'Wrapper generation failed for ${rust_fn_name}: ${err}'
- return
- }
- verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
- assert false, 'Verification failed for ${rust_fn_name}: ${err}'
- }
+ generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
+ assert false, 'Wrapper generation failed for ${rust_fn_name}: ${err}'
+ return
+ }
+ verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
+ assert false, 'Verification failed for ${rust_fn_name}: ${err}'
+ }
}
fn test_generate_wrapper_immutable_method() {
- rust_fn_name := 'get_name'
- struct_name := 'MyStruct'
- // Use get_function_from_file for methods too
- rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or { // Using get_function_from_file
- assert false, 'Failed to get method signature for ${struct_name}::${rust_fn_name}: ${err}'
- return
- }
- // Call directly using suspected correct function name
- struct_def := rust.get_struct_from_file(test_data_file, struct_name) or {
- assert false, 'Failed to get struct def for ${struct_name}: ${err}'
- return
- }
- struct_decls := [struct_def]
+ rust_fn_name := 'get_name'
+ struct_name := 'MyStruct'
+ // Use get_function_from_file for methods too
+ rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or { // Using get_function_from_file
+ assert false, 'Failed to get method signature for ${struct_name}::${rust_fn_name}: ${err}'
+ return
+ }
+ // Call directly using suspected correct function name
+ struct_def := rust.get_struct_from_file(test_data_file, struct_name) or {
+ assert false, 'Failed to get struct def for ${struct_name}: ${err}'
+ return
+ }
+ struct_decls := [struct_def]
- generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
- assert false, 'Wrapper generation failed for ${struct_name}::${rust_fn_name}: ${err}'
- return
- }
- verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
- assert false, 'Verification failed for ${struct_name}::${rust_fn_name}: ${err}'
- }
+ generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
+ assert false, 'Wrapper generation failed for ${struct_name}::${rust_fn_name}: ${err}'
+ return
+ }
+ verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
+ assert false, 'Verification failed for ${struct_name}::${rust_fn_name}: ${err}'
+ }
}
fn test_generate_wrapper_mutable_method() {
- rust_fn_name := 'set_name'
- struct_name := 'MyStruct'
- // Use get_function_from_file for methods too
- rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or { // Using get_function_from_file
- assert false, 'Failed to get method signature for ${struct_name}::${rust_fn_name}: ${err}'
- return
- }
- // Call directly using suspected correct function name
- struct_def := rust.get_struct_from_file(test_data_file, struct_name) or {
- assert false, 'Failed to get struct def for ${struct_name}: ${err}'
- return
- }
- struct_decls := [struct_def]
+ rust_fn_name := 'set_name'
+ struct_name := 'MyStruct'
+ // Use get_function_from_file for methods too
+ rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or { // Using get_function_from_file
+ assert false, 'Failed to get method signature for ${struct_name}::${rust_fn_name}: ${err}'
+ return
+ }
+ // Call directly using suspected correct function name
+ struct_def := rust.get_struct_from_file(test_data_file, struct_name) or {
+ assert false, 'Failed to get struct def for ${struct_name}: ${err}'
+ return
+ }
+ struct_decls := [struct_def]
- generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
- assert false, 'Wrapper generation failed for ${struct_name}::${rust_fn_name}: ${err}'
- return
- }
- verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
- assert false, 'Verification failed for ${struct_name}::${rust_fn_name}: ${err}'
- }
+ generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
+ assert false, 'Wrapper generation failed for ${struct_name}::${rust_fn_name}: ${err}'
+ return
+ }
+ verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
+ assert false, 'Verification failed for ${struct_name}::${rust_fn_name}: ${err}'
+ }
}
fn test_generate_wrapper_function_returning_result() {
- rust_fn_name := 'load_config'
- // Call directly using suspected correct function name
- rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or {
- assert false, 'Failed to get function signature for ${rust_fn_name}: ${err}'
- return
- }
- // Need struct def for Config if wrapper needs it (likely for return type)
- struct_name := 'Config'
- // Call directly using suspected correct function name
- struct_def := rust.get_struct_from_file(test_data_file, struct_name) or {
- assert false, 'Failed to get struct def for ${struct_name}: ${err}'
- return
- }
- struct_decls := [struct_def]
+ rust_fn_name := 'load_config'
+ // Call directly using suspected correct function name
+ rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or {
+ assert false, 'Failed to get function signature for ${rust_fn_name}: ${err}'
+ return
+ }
+ // Need struct def for Config if wrapper needs it (likely for return type)
+ struct_name := 'Config'
+ // Call directly using suspected correct function name
+ struct_def := rust.get_struct_from_file(test_data_file, struct_name) or {
+ assert false, 'Failed to get struct def for ${struct_name}: ${err}'
+ return
+ }
+ struct_decls := [struct_def]
- generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
- assert false, 'Wrapper generation failed for ${rust_fn_name}: ${err}'
- return
- }
- verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
- assert false, 'Verification failed for ${rust_fn_name}: ${err}'
- }
+ generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
+ assert false, 'Wrapper generation failed for ${rust_fn_name}: ${err}'
+ return
+ }
+ verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
+ assert false, 'Verification failed for ${rust_fn_name}: ${err}'
+ }
}
fn test_generate_wrapper_function_returning_pathbuf() {
- rust_fn_name := 'get_home_dir'
- // Call directly using suspected correct function name
- rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or {
- assert false, 'Failed to get function signature for ${rust_fn_name}: ${err}'
- return
- }
- struct_decls := []string{}
+ rust_fn_name := 'get_home_dir'
+ // Call directly using suspected correct function name
+ rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or {
+ assert false, 'Failed to get function signature for ${rust_fn_name}: ${err}'
+ return
+ }
+ struct_decls := []string{}
- generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
- assert false, 'Wrapper generation failed for ${rust_fn_name}: ${err}'
- return
- }
- verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
- assert false, 'Verification failed for ${rust_fn_name}: ${err}'
- }
+ generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
+ assert false, 'Wrapper generation failed for ${rust_fn_name}: ${err}'
+ return
+ }
+ verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
+ assert false, 'Verification failed for ${rust_fn_name}: ${err}'
+ }
}
fn test_generate_wrapper_function_with_vec() {
- rust_fn_name := 'list_files'
- // Call directly using suspected correct function name
- rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or {
- assert false, 'Failed to get function signature for ${rust_fn_name}: ${err}'
- return
- }
- struct_decls := []string{}
+ rust_fn_name := 'list_files'
+ // Call directly using suspected correct function name
+ rust_fn_sig := rust.get_function_from_file(test_data_file, rust_fn_name) or {
+ assert false, 'Failed to get function signature for ${rust_fn_name}: ${err}'
+ return
+ }
+ struct_decls := []string{}
- generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
- assert false, 'Wrapper generation failed for ${rust_fn_name}: ${err}'
- return
- }
- verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
- assert false, 'Verification failed for ${rust_fn_name}: ${err}'
- }
-}
\ No newline at end of file
+ generated_output := rhai.generate_rhai_function_wrapper(rust_fn_sig, struct_decls) or {
+ assert false, 'Wrapper generation failed for ${rust_fn_name}: ${err}'
+ return
+ }
+ verify_rhai_wrapper(rust_fn_sig, struct_decls, generated_output) or {
+ assert false, 'Verification failed for ${rust_fn_name}: ${err}'
+ }
+}
diff --git a/lib/lang/rhai/verify.v b/lib/lang/rhai/verify.v
index d7afb2c2..d7772f3d 100644
--- a/lib/lang/rhai/verify.v
+++ b/lib/lang/rhai/verify.v
@@ -25,60 +25,60 @@ fn get_primary_struct_name(struct_declarations []string) string {
// verify_rhai_wrapper checks if the AI-generated output contains a plausible Rhai wrapper.
// It now uses the struct declarations to check for correct method naming.
fn verify_rhai_wrapper(rust_fn_signature string, struct_declarations []string, generated_output string) ! {
- // 1. Extract Rust code block (same as before)
- mut code_block := ''
- if generated_output.contains('```rust') { // Use contains() for strings
- start_index := generated_output.index('```rust') or { -1 }
- end_index := generated_output.index_after('```', start_index + 7) or { -1 }
- if start_index != -1 && end_index != -1 {
- code_block = generated_output[start_index + 7..end_index].trim_space()
- } else {
- code_block = generated_output.trim_space()
- }
- } else {
- code_block = generated_output.trim_space()
- }
- assert code_block.len > 0, 'Could not extract code block from generated output: \n`${generated_output}`'
+ // 1. Extract Rust code block (same as before)
+ mut code_block := ''
+ if generated_output.contains('```rust') { // Use contains() for strings
+ start_index := generated_output.index('```rust') or { -1 }
+ end_index := generated_output.index_after('```', start_index + 7) or { -1 }
+ if start_index != -1 && end_index != -1 {
+ code_block = generated_output[start_index + 7..end_index].trim_space()
+ } else {
+ code_block = generated_output.trim_space()
+ }
+ } else {
+ code_block = generated_output.trim_space()
+ }
+ assert code_block.len > 0, 'Could not extract code block from generated output: \n`${generated_output}`'
- // 2. Determine Original Function Name and Expected Wrapper Name
- // Ideally, use rust module parsing here, but for now, simple string parsing:
- is_method := rust_fn_signature.contains('&self') || rust_fn_signature.contains('&mut self')
- mut original_fn_name := ''
- sig_parts := rust_fn_signature.split('(')
- if sig_parts.len > 0 {
- name_parts := sig_parts[0].split(' ')
- if name_parts.len > 0 {
- original_fn_name = name_parts.last()
- }
- }
- assert original_fn_name != '', 'Could not extract function name from signature: ${rust_fn_signature}'
+ // 2. Determine Original Function Name and Expected Wrapper Name
+ // Ideally, use rust module parsing here, but for now, simple string parsing:
+ is_method := rust_fn_signature.contains('&self') || rust_fn_signature.contains('&mut self')
+ mut original_fn_name := ''
+ sig_parts := rust_fn_signature.split('(')
+ if sig_parts.len > 0 {
+ name_parts := sig_parts[0].split(' ')
+ if name_parts.len > 0 {
+ original_fn_name = name_parts.last()
+ }
+ }
+ assert original_fn_name != '', 'Could not extract function name from signature: ${rust_fn_signature}'
- expected_wrapper_fn_name := if is_method {
- struct_name := get_primary_struct_name(struct_declarations)
- assert struct_name != '', 'Could not determine struct name for method: ${rust_fn_signature}'
- '${texttools.snake_case(struct_name)}_${original_fn_name}' // e.g., mystruct_get_name
- } else {
- original_fn_name // Standalone function uses the same name
- }
+ expected_wrapper_fn_name := if is_method {
+ struct_name := get_primary_struct_name(struct_declarations)
+ assert struct_name != '', 'Could not determine struct name for method: ${rust_fn_signature}'
+ '${texttools.snake_case(struct_name)}_${original_fn_name}' // e.g., mystruct_get_name
+ } else {
+ original_fn_name // Standalone function uses the same name
+ }
- // 3. Basic Signature Check (using expected_wrapper_fn_name)
- expected_sig_start := 'pub fn ${expected_wrapper_fn_name}'
- expected_sig_end := '-> Result<'
- expected_sig_very_end := 'Box>'
+ // 3. Basic Signature Check (using expected_wrapper_fn_name)
+ expected_sig_start := 'pub fn ${expected_wrapper_fn_name}'
+ expected_sig_end := '-> Result<'
+ expected_sig_very_end := 'Box>'
- assert code_block.contains(expected_sig_start), 'Wrapper missing signature start: `${expected_sig_start}` in\n${code_block}'
- assert code_block.contains(expected_sig_end), 'Wrapper missing signature end: `${expected_sig_end}` in\n${code_block}'
- assert code_block.contains(expected_sig_very_end), 'Wrapper missing signature very end: `${expected_sig_very_end}` in\n${code_block}'
+ assert code_block.contains(expected_sig_start), 'Wrapper missing signature start: `${expected_sig_start}` in\n${code_block}'
+ assert code_block.contains(expected_sig_end), 'Wrapper missing signature end: `${expected_sig_end}` in\n${code_block}'
+ assert code_block.contains(expected_sig_very_end), 'Wrapper missing signature very end: `${expected_sig_very_end}` in\n${code_block}'
- // 4. Basic Body Check (Check for call to the *original* function name)
- body_start := code_block.index('{') or { -1 }
- body_end := code_block.last_index('}') or { -1 }
- if body_start != -1 && body_end != -1 && body_start < body_end {
- body := code_block[body_start + 1..body_end]
- // Check for call like `original_fn_name(...)` or `receiver.original_fn_name(...)`
- assert body.contains(original_fn_name + '(') || body.contains('.' + original_fn_name + '('), 'Wrapper body does not appear to call original function `${original_fn_name}` in\n${body}'
- } else {
- assert false, 'Could not find function body `{...}` in wrapper:\n${code_block}'
- }
+ // 4. Basic Body Check (Check for call to the *original* function name)
+ body_start := code_block.index('{') or { -1 }
+ body_end := code_block.last_index('}') or { -1 }
+ if body_start != -1 && body_end != -1 && body_start < body_end {
+ body := code_block[body_start + 1..body_end]
+ // Check for call like `original_fn_name(...)` or `receiver.original_fn_name(...)`
+ assert body.contains(original_fn_name + '(') || body.contains('.' + original_fn_name + '('), 'Wrapper body does not appear to call original function `${original_fn_name}` in\n${body}'
+ } else {
+ assert false, 'Could not find function body `{...}` in wrapper:\n${code_block}'
+ }
// If all checks pass, do nothing (implicitly ok)
}
diff --git a/lib/lang/rust/rust.v b/lib/lang/rust/rust.v
index 80637640..d07e15ee 100644
--- a/lib/lang/rust/rust.v
+++ b/lib/lang/rust/rust.v
@@ -5,1002 +5,1028 @@ import freeflowuniverse.herolib.core.pathlib
// Reads and combines all Rust files in the given directory
pub fn read_source_code(source_code_path string) !string {
- // Get all files in the directory
- files := os.ls(source_code_path) or {
- return error('Failed to list files in directory: ${err}')
- }
-
- // Combine all Rust files into a single source code string
- mut source_code := ''
- for file in files {
- file_path := os.join_path(source_code_path, file)
-
- // Skip directories and non-Rust files
- if os.is_dir(file_path) || !file.ends_with('.rs') {
- continue
- }
-
- // Read the file content
- file_content := os.read_file(file_path) or {
- println('Failed to read file ${file_path}: ${err}')
- continue
- }
-
- // Add file content to the combined source code
- source_code += '// File: ${file}\n${file_content}\n\n'
- }
-
- if source_code == '' {
- return error('No Rust files found in directory: ${source_code_path}')
- }
-
- return source_code
+ // Get all files in the directory
+ files := os.ls(source_code_path) or {
+ return error('Failed to list files in directory: ${err}')
+ }
+
+ // Combine all Rust files into a single source code string
+ mut source_code := ''
+ for file in files {
+ file_path := os.join_path(source_code_path, file)
+
+ // Skip directories and non-Rust files
+ if os.is_dir(file_path) || !file.ends_with('.rs') {
+ continue
+ }
+
+ // Read the file content
+ file_content := os.read_file(file_path) or {
+ println('Failed to read file ${file_path}: ${err}')
+ continue
+ }
+
+ // Add file content to the combined source code
+ source_code += '// File: ${file}\n${file_content}\n\n'
+ }
+
+ if source_code == '' {
+ return error('No Rust files found in directory: ${source_code_path}')
+ }
+
+ return source_code
}
// Determines the crate path from the source code path
pub fn determine_crate_path(source_code_path string) !string {
- // Extract the path relative to the src directory
- src_index := source_code_path.index('src/') or {
- return error('Could not determine crate path: src/ not found in path')
- }
-
- mut path_parts := source_code_path[src_index+4..].split('/')
- // Remove the last part (the file name)
- if path_parts.len > 0 {
- path_parts.delete_last()
- }
- rel_path := path_parts.join('::')
- return 'sal::${rel_path}'
+ // Extract the path relative to the src directory
+ src_index := source_code_path.index('src/') or {
+ return error('Could not determine crate path: src/ not found in path')
+ }
+
+ mut path_parts := source_code_path[src_index + 4..].split('/')
+ // Remove the last part (the file name)
+ if path_parts.len > 0 {
+ path_parts.delete_last()
+ }
+ rel_path := path_parts.join('::')
+ return 'sal::${rel_path}'
}
// Extracts the module name from a directory path
pub fn extract_module_name_from_path(path string) string {
- dir_parts := path.split('/')
- return dir_parts[dir_parts.len - 1]
+ dir_parts := path.split('/')
+ return dir_parts[dir_parts.len - 1]
}
// Determines the source package information from a given source path
pub struct SourcePackageInfo {
pub:
- name string // Package name
- path string // Relative path to the package (for cargo.toml)
- module string // Full module path (e.g., herodb::logic)
+ name string // Package name
+ path string // Relative path to the package (for cargo.toml)
+ module string // Full module path (e.g., herodb::logic)
}
// Detect source package and module information from a path
pub fn detect_source_package(source_path string) !SourcePackageInfo {
- // Look for Cargo.toml in parent directories to find the crate root
- mut current_path := source_path
- mut package_name := ''
- mut rel_path := ''
- mut module_parts := []string{}
-
- // Extract module name from the directory path
- mod_name := extract_module_name_from_path(source_path)
- module_parts << mod_name
-
- // Look up parent directories until we find a Cargo.toml
- for i := 0; i < 10; i++ { // limit depth to avoid infinite loops
- parent_dir := os.dir(current_path)
- cargo_path := os.join_path(parent_dir, 'Cargo.toml')
-
- if os.exists(cargo_path) {
- // Found the root of the crate
- cargo_content := os.read_file(cargo_path) or {
- return error('Failed to read Cargo.toml at ${cargo_path}: ${err}')
- }
-
- // Extract package name
- for line in cargo_content.split('\n') {
- if line.contains('name') && line.contains('=') {
- parts := line.split('=')
- if parts.len > 1 {
- package_name = parts[1].trim_space().trim('"').trim("'")
- break
- }
- }
- }
-
- // Calculate relative path from current working directory to crate root
- current_dir := os.getwd()
- rel_path = pathlib.path_relative(parent_dir, current_dir) or {
- return error('Failed to get relative path: ${err}')
- }
- if rel_path == '.' {
- rel_path = './'
- }
-
- break
- }
-
- // Go up one directory
- if parent_dir == current_path {
- break // We've reached the root
- }
-
- // Add directory name to module path parts (in reverse order)
- parent_dir_name := os.base(parent_dir)
- if parent_dir_name != '' && parent_dir_name != '.' {
- module_parts.insert(0, parent_dir_name)
- }
-
- current_path = parent_dir
- }
-
- if package_name == '' {
- // If no Cargo.toml found, use the last directory name as package name
- package_name = os.base(os.dir(source_path))
- rel_path = '../' // default to parent directory
- }
-
- // Construct the full module path
- mut module_path := module_parts.join('::')
- if module_parts.len >= 2 {
- // Use only the last two components for the module path
- module_path = module_parts[module_parts.len-2..].join('::')
- }
-
- return SourcePackageInfo{
- name: package_name
- path: rel_path
- module: module_path
- }
+ // Look for Cargo.toml in parent directories to find the crate root
+ mut current_path := source_path
+ mut package_name := ''
+ mut rel_path := ''
+ mut module_parts := []string{}
+
+ // Extract module name from the directory path
+ mod_name := extract_module_name_from_path(source_path)
+ module_parts << mod_name
+
+ // Look up parent directories until we find a Cargo.toml
+ for i := 0; i < 10; i++ { // limit depth to avoid infinite loops
+ parent_dir := os.dir(current_path)
+ cargo_path := os.join_path(parent_dir, 'Cargo.toml')
+
+ if os.exists(cargo_path) {
+ // Found the root of the crate
+ cargo_content := os.read_file(cargo_path) or {
+ return error('Failed to read Cargo.toml at ${cargo_path}: ${err}')
+ }
+
+ // Extract package name
+ for line in cargo_content.split('\n') {
+ if line.contains('name') && line.contains('=') {
+ parts := line.split('=')
+ if parts.len > 1 {
+ package_name = parts[1].trim_space().trim('"').trim("'")
+ break
+ }
+ }
+ }
+
+ // Calculate relative path from current working directory to crate root
+ current_dir := os.getwd()
+ rel_path = pathlib.path_relative(parent_dir, current_dir) or {
+ return error('Failed to get relative path: ${err}')
+ }
+ if rel_path == '.' {
+ rel_path = './'
+ }
+
+ break
+ }
+
+ // Go up one directory
+ if parent_dir == current_path {
+ break // We've reached the root
+ }
+
+ // Add directory name to module path parts (in reverse order)
+ parent_dir_name := os.base(parent_dir)
+ if parent_dir_name != '' && parent_dir_name != '.' {
+ module_parts.insert(0, parent_dir_name)
+ }
+
+ current_path = parent_dir
+ }
+
+ if package_name == '' {
+ // If no Cargo.toml found, use the last directory name as package name
+ package_name = os.base(os.dir(source_path))
+ rel_path = '../' // default to parent directory
+ }
+
+ // Construct the full module path
+ mut module_path := module_parts.join('::')
+ if module_parts.len >= 2 {
+ // Use only the last two components for the module path
+ module_path = module_parts[module_parts.len - 2..].join('::')
+ }
+
+ return SourcePackageInfo{
+ name: package_name
+ path: rel_path
+ module: module_path
+ }
}
// Build and run a Rust project with an example
pub fn run_example(project_dir string, example_name string) !(string, string) {
- // Change to the project directory
- os.chdir(project_dir) or {
- return error('Failed to change directory to project: ${err}')
- }
-
- // Run cargo build first
- build_result := os.execute('cargo build')
- if build_result.exit_code != 0 {
- return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
- }
-
- // Run the example
- run_result := os.execute('cargo run --example ${example_name}')
-
- return build_result.output, run_result.output
+ // Change to the project directory
+ os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
+
+ // Run cargo build first
+ build_result := os.execute('cargo build')
+ if build_result.exit_code != 0 {
+ return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
+ }
+
+ // Run the example
+ run_result := os.execute('cargo run --example ${example_name}')
+
+ return build_result.output, run_result.output
}
// Extract function names from wrapper code
fn extract_functions_from_code(code string) []string {
- mut functions := []string{}
- lines := code.split('\n')
-
- for line in lines {
- if line.contains('pub fn ') && !line.contains('//') {
- // Extract function name
- parts := line.split('pub fn ')
- if parts.len > 1 {
- name_parts := parts[1].split('(')
- if name_parts.len > 0 {
- fn_name := name_parts[0].trim_space()
- if fn_name != '' {
- functions << fn_name
- }
- }
- }
- }
- }
-
- return functions
+ mut functions := []string{}
+ lines := code.split('\n')
+
+ for line in lines {
+ if line.contains('pub fn ') && !line.contains('//') {
+ // Extract function name
+ parts := line.split('pub fn ')
+ if parts.len > 1 {
+ name_parts := parts[1].split('(')
+ if name_parts.len > 0 {
+ fn_name := name_parts[0].trim_space()
+ if fn_name != '' {
+ functions << fn_name
+ }
+ }
+ }
+ }
+ }
+
+ return functions
}
// Extract function names from Rust file
pub fn list_functions_in_file(file_path string) ![]string {
- // Check if file exists
- if !os.exists(file_path) {
- return error('File not found: ${file_path}')
- }
+ // Check if file exists
+ if !os.exists(file_path) {
+ return error('File not found: ${file_path}')
+ }
- // Read file content
- content := os.read_file(file_path) or {
- return error('Failed to read file: ${err}')
- }
+ // Read file content
+ content := os.read_file(file_path) or { return error('Failed to read file: ${err}') }
- return extract_functions_from_content(content)
+ return extract_functions_from_content(content)
}
// Extract function names from content string
pub fn extract_functions_from_content(content string) []string {
- mut functions := []string{}
- lines := content.split('\n')
-
- mut in_comment_block := false
- mut current_impl := '' // Track the current impl block
- mut impl_level := 0 // Track nesting level of braces within impl
-
- for line in lines {
- trimmed := line.trim_space()
-
- // Skip comment lines and empty lines
- if trimmed.starts_with('//') || trimmed == '' {
- continue
- }
-
- // Handle block comments
- if trimmed.starts_with('/*') {
- in_comment_block = true
- }
- if in_comment_block {
- if trimmed.contains('*/') {
- in_comment_block = false
- }
- continue
- }
-
- // Check for impl blocks
- if trimmed.starts_with('impl ') {
- // Extract the struct name from the impl declaration
- mut struct_name := ''
-
- // Handle generic impls like "impl StructName"
- if trimmed.contains('<') && trimmed.contains('>') {
- // Complex case with generics
- if trimmed.contains(' for ') {
- // Format: impl Trait for StructName
- parts := trimmed.split(' for ')
- if parts.len > 1 {
- struct_parts := parts[1].split('{')
- if struct_parts.len > 0 {
- struct_name = struct_parts[0].trim_space()
- // Remove any generic parameters
- if struct_name.contains('<') {
- struct_name = struct_name.all_before('<')
- }
- }
- }
- } else {
- // Format: impl StructName
- after_impl := trimmed.all_after('impl')
- after_generic := after_impl.all_after('>')
- struct_parts := after_generic.split('{')
- if struct_parts.len > 0 {
- struct_name = struct_parts[0].trim_space()
- // Remove any generic parameters
- if struct_name.contains('<') {
- struct_name = struct_name.all_before('<')
- }
- }
- }
- } else {
- // Simple case without generics
- if trimmed.contains(' for ') {
- // Format: impl Trait for StructName
- parts := trimmed.split(' for ')
- if parts.len > 1 {
- struct_parts := parts[1].split('{')
- if struct_parts.len > 0 {
- struct_name = struct_parts[0].trim_space()
- }
- }
- } else {
- // Format: impl StructName
- parts := trimmed.split('impl ')
- if parts.len > 1 {
- struct_parts := parts[1].split('{')
- if struct_parts.len > 0 {
- struct_name = struct_parts[0].trim_space()
- }
- }
- }
- }
-
- current_impl = struct_name
- if trimmed.contains('{') {
- impl_level = 1
- } else {
- impl_level = 0
- }
- continue
- }
-
- // Track brace levels to properly handle nested blocks
- if current_impl != '' {
- // Count opening braces
- for c in trimmed {
- if c == `{` {
- impl_level++
- } else if c == `}` {
- impl_level--
- // If we've closed the impl block, reset current_impl
- if impl_level == 0 {
- current_impl = ''
- break
- }
- }
- }
- }
-
- // Look for function declarations
- if (trimmed.starts_with('pub fn ') || trimmed.starts_with('fn ')) && !trimmed.contains(';') {
- mut fn_name := ''
-
- // Extract function name
- if trimmed.starts_with('pub fn ') {
- fn_parts := trimmed.split('pub fn ')
- if fn_parts.len > 1 {
- name_parts := fn_parts[1].split('(')
- if name_parts.len > 0 {
- fn_name = name_parts[0].trim_space()
- }
- }
- } else {
- fn_parts := trimmed.split('fn ')
- if fn_parts.len > 1 {
- name_parts := fn_parts[1].split('(')
- if name_parts.len > 0 {
- fn_name = name_parts[0].trim_space()
- }
- }
- }
-
- // Add function name to the list if it's not empty
- if fn_name != '' {
- if current_impl != '' {
- // All functions in an impl block use :: notation
- functions << '${current_impl}::${fn_name}'
- } else {
- // Regular function
- functions << fn_name
- }
- }
- }
- }
-
- return functions
+ mut functions := []string{}
+ lines := content.split('\n')
+
+ mut in_comment_block := false
+ mut current_impl := '' // Track the current impl block
+ mut impl_level := 0 // Track nesting level of braces within impl
+
+ for line in lines {
+ trimmed := line.trim_space()
+
+ // Skip comment lines and empty lines
+ if trimmed.starts_with('//') || trimmed == '' {
+ continue
+ }
+
+ // Handle block comments
+ if trimmed.starts_with('/*') {
+ in_comment_block = true
+ }
+ if in_comment_block {
+ if trimmed.contains('*/') {
+ in_comment_block = false
+ }
+ continue
+ }
+
+ // Check for impl blocks
+ if trimmed.starts_with('impl ') {
+ // Extract the struct name from the impl declaration
+ mut struct_name := ''
+
+ // Handle generic impls like "impl StructName"
+ if trimmed.contains('<') && trimmed.contains('>') {
+ // Complex case with generics
+ if trimmed.contains(' for ') {
+ // Format: impl Trait for StructName
+ parts := trimmed.split(' for ')
+ if parts.len > 1 {
+ struct_parts := parts[1].split('{')
+ if struct_parts.len > 0 {
+ struct_name = struct_parts[0].trim_space()
+ // Remove any generic parameters
+ if struct_name.contains('<') {
+ struct_name = struct_name.all_before('<')
+ }
+ }
+ }
+ } else {
+ // Format: impl StructName
+ after_impl := trimmed.all_after('impl')
+ after_generic := after_impl.all_after('>')
+ struct_parts := after_generic.split('{')
+ if struct_parts.len > 0 {
+ struct_name = struct_parts[0].trim_space()
+ // Remove any generic parameters
+ if struct_name.contains('<') {
+ struct_name = struct_name.all_before('<')
+ }
+ }
+ }
+ } else {
+ // Simple case without generics
+ if trimmed.contains(' for ') {
+ // Format: impl Trait for StructName
+ parts := trimmed.split(' for ')
+ if parts.len > 1 {
+ struct_parts := parts[1].split('{')
+ if struct_parts.len > 0 {
+ struct_name = struct_parts[0].trim_space()
+ }
+ }
+ } else {
+ // Format: impl StructName
+ parts := trimmed.split('impl ')
+ if parts.len > 1 {
+ struct_parts := parts[1].split('{')
+ if struct_parts.len > 0 {
+ struct_name = struct_parts[0].trim_space()
+ }
+ }
+ }
+ }
+
+ current_impl = struct_name
+ if trimmed.contains('{') {
+ impl_level = 1
+ } else {
+ impl_level = 0
+ }
+ continue
+ }
+
+ // Track brace levels to properly handle nested blocks
+ if current_impl != '' {
+ // Count opening braces
+ for c in trimmed {
+ if c == `{` {
+ impl_level++
+ } else if c == `}` {
+ impl_level--
+ // If we've closed the impl block, reset current_impl
+ if impl_level == 0 {
+ current_impl = ''
+ break
+ }
+ }
+ }
+ }
+
+ // Look for function declarations
+ if (trimmed.starts_with('pub fn ') || trimmed.starts_with('fn ')) && !trimmed.contains(';') {
+ mut fn_name := ''
+
+ // Extract function name
+ if trimmed.starts_with('pub fn ') {
+ fn_parts := trimmed.split('pub fn ')
+ if fn_parts.len > 1 {
+ name_parts := fn_parts[1].split('(')
+ if name_parts.len > 0 {
+ fn_name = name_parts[0].trim_space()
+ }
+ }
+ } else {
+ fn_parts := trimmed.split('fn ')
+ if fn_parts.len > 1 {
+ name_parts := fn_parts[1].split('(')
+ if name_parts.len > 0 {
+ fn_name = name_parts[0].trim_space()
+ }
+ }
+ }
+
+ // Add function name to the list if it's not empty
+ if fn_name != '' {
+ if current_impl != '' {
+ // All functions in an impl block use :: notation
+ functions << '${current_impl}::${fn_name}'
+ } else {
+ // Regular function
+ functions << fn_name
+ }
+ }
+ }
+ }
+
+ return functions
}
// Extract struct names from Rust file
pub fn list_structs_in_file(file_path string) ![]string {
- // Check if file exists
- if !os.exists(file_path) {
- return error('File not found: ${file_path}')
- }
-
- // Read file content
- content := os.read_file(file_path) or {
- return error('Failed to read file: ${err}')
- }
-
- return extract_structs_from_content(content)
+ // Check if file exists
+ if !os.exists(file_path) {
+ return error('File not found: ${file_path}')
+ }
+
+ // Read file content
+ content := os.read_file(file_path) or { return error('Failed to read file: ${err}') }
+
+ return extract_structs_from_content(content)
}
// Extract struct names from content string
pub fn extract_structs_from_content(content string) []string {
- mut structs := []string{}
- lines := content.split('\n')
-
- mut in_comment_block := false
-
- for line in lines {
- trimmed := line.trim_space()
-
- // Skip comment lines and empty lines
- if trimmed.starts_with('//') {
- continue
- }
-
- // Handle block comments
- if trimmed.starts_with('/*') {
- in_comment_block = true
- }
- if in_comment_block {
- if trimmed.contains('*/') {
- in_comment_block = false
- }
- continue
- }
-
- // Look for struct declarations
- if (trimmed.starts_with('pub struct ') || trimmed.starts_with('struct ')) && !trimmed.contains(';') {
- mut struct_name := ''
-
- // Extract struct name
- if trimmed.starts_with('pub struct ') {
- struct_parts := trimmed.split('pub struct ')
- if struct_parts.len > 1 {
- name_parts := struct_parts[1].split('{')
- if name_parts.len > 0 {
- parts := name_parts[0].split('<')
- struct_name = parts[0].trim_space()
- }
- }
- } else {
- struct_parts := trimmed.split('struct ')
- if struct_parts.len > 1 {
- name_parts := struct_parts[1].split('{')
- if name_parts.len > 0 {
- parts := name_parts[0].split('<')
- struct_name = parts[0].trim_space()
- }
- }
- }
-
- // Add struct name to the list if it's not empty
- if struct_name != '' {
- structs << struct_name
- }
- }
- }
-
- return structs
+ mut structs := []string{}
+ lines := content.split('\n')
+
+ mut in_comment_block := false
+
+ for line in lines {
+ trimmed := line.trim_space()
+
+ // Skip comment lines and empty lines
+ if trimmed.starts_with('//') {
+ continue
+ }
+
+ // Handle block comments
+ if trimmed.starts_with('/*') {
+ in_comment_block = true
+ }
+ if in_comment_block {
+ if trimmed.contains('*/') {
+ in_comment_block = false
+ }
+ continue
+ }
+
+ // Look for struct declarations
+ if (trimmed.starts_with('pub struct ') || trimmed.starts_with('struct '))
+ && !trimmed.contains(';') {
+ mut struct_name := ''
+
+ // Extract struct name
+ if trimmed.starts_with('pub struct ') {
+ struct_parts := trimmed.split('pub struct ')
+ if struct_parts.len > 1 {
+ name_parts := struct_parts[1].split('{')
+ if name_parts.len > 0 {
+ parts := name_parts[0].split('<')
+ struct_name = parts[0].trim_space()
+ }
+ }
+ } else {
+ struct_parts := trimmed.split('struct ')
+ if struct_parts.len > 1 {
+ name_parts := struct_parts[1].split('{')
+ if name_parts.len > 0 {
+ parts := name_parts[0].split('<')
+ struct_name = parts[0].trim_space()
+ }
+ }
+ }
+
+ // Add struct name to the list if it's not empty
+ if struct_name != '' {
+ structs << struct_name
+ }
+ }
+ }
+
+ return structs
}
// Extract imports from a Rust file
pub fn extract_imports(file_path string) ![]string {
- // Check if file exists
- if !os.exists(file_path) {
- return error('File not found: ${file_path}')
- }
-
- // Read file content
- content := os.read_file(file_path) or {
- return error('Failed to read file: ${err}')
- }
-
- return extract_imports_from_content(content)
+ // Check if file exists
+ if !os.exists(file_path) {
+ return error('File not found: ${file_path}')
+ }
+
+ // Read file content
+ content := os.read_file(file_path) or { return error('Failed to read file: ${err}') }
+
+ return extract_imports_from_content(content)
}
// Extract imports from content string
pub fn extract_imports_from_content(content string) []string {
- mut imports := []string{}
- lines := content.split('\n')
-
- mut in_comment_block := false
-
- for line in lines {
- trimmed := line.trim_space()
-
- // Skip comment lines and empty lines
- if trimmed.starts_with('//') {
- continue
- }
-
- // Handle block comments
- if trimmed.starts_with('/*') {
- in_comment_block = true
- }
- if in_comment_block {
- if trimmed.contains('*/') {
- in_comment_block = false
- }
- continue
- }
-
- // Extract use statements
- if trimmed.starts_with('use ') && trimmed.ends_with(';') {
- import_part := trimmed[4..trimmed.len-1].trim_space() // Skip 'use ', remove trailing ';', trim spaces
- imports << import_part
- }
- }
-
- return imports
+ mut imports := []string{}
+ lines := content.split('\n')
+
+ mut in_comment_block := false
+
+ for line in lines {
+ trimmed := line.trim_space()
+
+ // Skip comment lines and empty lines
+ if trimmed.starts_with('//') {
+ continue
+ }
+
+ // Handle block comments
+ if trimmed.starts_with('/*') {
+ in_comment_block = true
+ }
+ if in_comment_block {
+ if trimmed.contains('*/') {
+ in_comment_block = false
+ }
+ continue
+ }
+
+ // Extract use statements
+ if trimmed.starts_with('use ') && trimmed.ends_with(';') {
+ import_part := trimmed[4..trimmed.len - 1].trim_space() // Skip 'use ', remove trailing ';', trim spaces
+ imports << import_part
+ }
+ }
+
+ return imports
}
// Get module name from file path
pub fn get_module_name(file_path string) string {
- // Extract filename from path
- filename := os.base(file_path)
-
- // If it's mod.rs, use parent directory name
- if filename == 'mod.rs' {
- dir := os.dir(file_path)
- return os.base(dir)
- }
-
- // Otherwise use filename without extension
- return filename.all_before('.rs')
+ // Extract filename from path
+ filename := os.base(file_path)
+
+ // If it's mod.rs, use parent directory name
+ if filename == 'mod.rs' {
+ dir := os.dir(file_path)
+ return os.base(dir)
+ }
+
+ // Otherwise use filename without extension
+ return filename.all_before('.rs')
}
// List all modules in a directory
pub fn list_modules_in_directory(dir_path string) ![]string {
- // Check if directory exists
- if !os.exists(dir_path) || !os.is_dir(dir_path) {
- return error('Directory not found: ${dir_path}')
- }
-
- // Get all files in the directory
- files := os.ls(dir_path) or {
- return error('Failed to list files in directory: ${err}')
- }
-
- mut modules := []string{}
-
- // Check for mod.rs
- if files.contains('mod.rs') {
- modules << os.base(dir_path)
- }
-
- // Check for Rust files
- for file in files {
- if file.ends_with('.rs') && file != 'mod.rs' {
- modules << file.all_before('.rs')
- }
- }
-
- // Check for directories that contain mod.rs
- for file in files {
- file_path := os.join_path(dir_path, file)
- if os.is_dir(file_path) {
- subfiles := os.ls(file_path) or { continue }
- if subfiles.contains('mod.rs') {
- modules << file
- }
- }
- }
-
- return modules
+ // Check if directory exists
+ if !os.exists(dir_path) || !os.is_dir(dir_path) {
+ return error('Directory not found: ${dir_path}')
+ }
+
+ // Get all files in the directory
+ files := os.ls(dir_path) or { return error('Failed to list files in directory: ${err}') }
+
+ mut modules := []string{}
+
+ // Check for mod.rs
+ if files.contains('mod.rs') {
+ modules << os.base(dir_path)
+ }
+
+ // Check for Rust files
+ for file in files {
+ if file.ends_with('.rs') && file != 'mod.rs' {
+ modules << file.all_before('.rs')
+ }
+ }
+
+ // Check for directories that contain mod.rs
+ for file in files {
+ file_path := os.join_path(dir_path, file)
+ if os.is_dir(file_path) {
+ subfiles := os.ls(file_path) or { continue }
+ if subfiles.contains('mod.rs') {
+ modules << file
+ }
+ }
+ }
+
+ return modules
}
// Generate an import statement for a module based on current file and target module path
pub fn generate_import_statement(current_file_path string, target_module_path string) !string {
- // Attempt to find the project root (directory containing Cargo.toml)
- mut project_root := ''
- mut current_path := os.dir(current_file_path)
-
- // Find the project root
- for i := 0; i < 10; i++ { // Limit depth to avoid infinite loops
- cargo_path := os.join_path(current_path, 'Cargo.toml')
- if os.exists(cargo_path) {
- project_root = current_path
- break
- }
-
- parent_dir := os.dir(current_path)
- if parent_dir == current_path {
- break // We've reached the root
- }
- current_path = parent_dir
- }
-
- if project_root == '' {
- return error('Could not find project root (Cargo.toml)')
- }
-
- // Get package info
- pkg_info := detect_source_package(current_file_path) or {
- return error('Failed to detect package info: ${err}')
- }
-
- // Check if target module is part of the same package
- target_pkg_info := detect_source_package(target_module_path) or {
- return error('Failed to detect target package info: ${err}')
- }
-
- // If same package, generate a relative import
- if pkg_info.name == target_pkg_info.name {
- // Convert file paths to module paths
- current_file_dir := os.dir(current_file_path)
- target_file_dir := os.dir(target_module_path)
-
- // Get paths relative to src
- current_rel_path := current_file_dir.replace('${project_root}/src/', '')
- target_rel_path := target_file_dir.replace('${project_root}/src/', '')
-
- // Convert paths to module format
- current_module := current_rel_path.replace('/', '::')
- target_module := target_rel_path.replace('/', '::')
-
- // Generate import based on path relationship
- if current_module == target_module {
- // Same module, import target directly
- target_name := get_module_name(target_module_path)
- return 'use crate::${target_module}::${target_name};'
- } else if current_module.contains(target_module) {
- // Target is parent module
- target_name := get_module_name(target_module_path)
- return 'use super::${target_name};'
- } else if target_module.contains(current_module) {
- // Target is child module
- target_name := get_module_name(target_module_path)
- child_path := target_module.replace('${current_module}::', '')
- return 'use self::${child_path}::${target_name};'
- } else {
- // Target is sibling or other module
- target_name := get_module_name(target_module_path)
- return 'use crate::${target_module}::${target_name};'
- }
- } else {
- // External package
- return 'use ${target_pkg_info.name}::${target_pkg_info.module};'
- }
+ // Attempt to find the project root (directory containing Cargo.toml)
+ mut project_root := ''
+ mut current_path := os.dir(current_file_path)
+
+ // Find the project root
+ for i := 0; i < 10; i++ { // Limit depth to avoid infinite loops
+ cargo_path := os.join_path(current_path, 'Cargo.toml')
+ if os.exists(cargo_path) {
+ project_root = current_path
+ break
+ }
+
+ parent_dir := os.dir(current_path)
+ if parent_dir == current_path {
+ break // We've reached the root
+ }
+ current_path = parent_dir
+ }
+
+ if project_root == '' {
+ return error('Could not find project root (Cargo.toml)')
+ }
+
+ // Get package info
+ pkg_info := detect_source_package(current_file_path) or {
+ return error('Failed to detect package info: ${err}')
+ }
+
+ // Check if target module is part of the same package
+ target_pkg_info := detect_source_package(target_module_path) or {
+ return error('Failed to detect target package info: ${err}')
+ }
+
+ // If same package, generate a relative import
+ if pkg_info.name == target_pkg_info.name {
+ // Convert file paths to module paths
+ current_file_dir := os.dir(current_file_path)
+ target_file_dir := os.dir(target_module_path)
+
+ // Get paths relative to src
+ current_rel_path := current_file_dir.replace('${project_root}/src/', '')
+ target_rel_path := target_file_dir.replace('${project_root}/src/', '')
+
+ // Convert paths to module format
+ current_module := current_rel_path.replace('/', '::')
+ target_module := target_rel_path.replace('/', '::')
+
+ // Generate import based on path relationship
+ if current_module == target_module {
+ // Same module, import target directly
+ target_name := get_module_name(target_module_path)
+ return 'use crate::${target_module}::${target_name};'
+ } else if current_module.contains(target_module) {
+ // Target is parent module
+ target_name := get_module_name(target_module_path)
+ return 'use super::${target_name};'
+ } else if target_module.contains(current_module) {
+ // Target is child module
+ target_name := get_module_name(target_module_path)
+ child_path := target_module.replace('${current_module}::', '')
+ return 'use self::${child_path}::${target_name};'
+ } else {
+ // Target is sibling or other module
+ target_name := get_module_name(target_module_path)
+ return 'use crate::${target_module}::${target_name};'
+ }
+ } else {
+ // External package
+ return 'use ${target_pkg_info.name}::${target_pkg_info.module};'
+ }
}
// Extract dependencies from Cargo.toml
pub fn extract_dependencies(cargo_path string) !map[string]string {
- // Check if file exists
- if !os.exists(cargo_path) {
- return error('Cargo.toml not found: ${cargo_path}')
- }
-
- // Read file content
- content := os.read_file(cargo_path) or {
- return error('Failed to read Cargo.toml: ${err}')
- }
-
- mut dependencies := map[string]string{}
- mut in_dependencies_section := false
-
- lines := content.split('\n')
- for line in lines {
- trimmed := line.trim_space()
-
- // Check for dependencies section
- if trimmed == '[dependencies]' {
- in_dependencies_section = true
- continue
- } else if trimmed.starts_with('[') && in_dependencies_section {
- // Left dependencies section
- in_dependencies_section = false
- continue
- }
-
- // Extract dependency info
- if in_dependencies_section && trimmed != '' {
- if trimmed.contains('=') {
- eq_pos := trimmed.index('=') or { continue } // Find the first '='
- name := trimmed[..eq_pos].trim_space()
- mut value := trimmed[eq_pos+1..].trim_space()
+ // Check if file exists
+ if !os.exists(cargo_path) {
+ return error('Cargo.toml not found: ${cargo_path}')
+ }
- // Remove surrounding quotes if they exist (optional, but good practice for simple strings)
- // Note: This won't remove braces for tables, which is desired.
- if value.starts_with('"') && value.ends_with('"') {
- value = value[1..value.len-1]
- } else if value.starts_with("'") && value.ends_with("'") {
- value = value[1..value.len-1]
- }
- dependencies[name] = value // Store the potentially complex value string
- }
- }
- }
-
- return dependencies
+ // Read file content
+ content := os.read_file(cargo_path) or { return error('Failed to read Cargo.toml: ${err}') }
+
+ mut dependencies := map[string]string{}
+ mut in_dependencies_section := false
+
+ lines := content.split('\n')
+ for line in lines {
+ trimmed := line.trim_space()
+
+ // Check for dependencies section
+ if trimmed == '[dependencies]' {
+ in_dependencies_section = true
+ continue
+ } else if trimmed.starts_with('[') && in_dependencies_section {
+ // Left dependencies section
+ in_dependencies_section = false
+ continue
+ }
+
+ // Extract dependency info
+ if in_dependencies_section && trimmed != '' {
+ if trimmed.contains('=') {
+ eq_pos := trimmed.index('=') or { continue } // Find the first '='
+ name := trimmed[..eq_pos].trim_space()
+ mut value := trimmed[eq_pos + 1..].trim_space()
+
+ // Remove surrounding quotes if they exist (optional, but good practice for simple strings)
+ // Note: This won't remove braces for tables, which is desired.
+ if value.starts_with('"') && value.ends_with('"') {
+ value = value[1..value.len - 1]
+ } else if value.starts_with("'") && value.ends_with("'") {
+ value = value[1..value.len - 1]
+ }
+ dependencies[name] = value // Store the potentially complex value string
+ }
+ }
+ }
+
+ return dependencies
}
// Get a function declaration from a file by its name
pub fn get_function_from_file(file_path string, function_name string) !string {
- // Check if file exists
- if !os.exists(file_path) {
- return error('File not found: ${file_path}')
- }
-
- // Read file content
- content := os.read_file(file_path) or {
- return error('Failed to read file: ${err}')
- }
-
- return get_function_from_content(content, function_name)
+ // Check if file exists
+ if !os.exists(file_path) {
+ return error('File not found: ${file_path}')
+ }
+
+ // Read file content
+ content := os.read_file(file_path) or { return error('Failed to read file: ${err}') }
+
+ return get_function_from_content(content, function_name)
}
// Get a function declaration from a module by its name
pub fn get_function_from_module(module_path string, function_name string) !string {
- // Check if directory exists
- if !os.exists(module_path) {
- return error('Module path not found: ${module_path}')
- }
-
- // If it's a directory, look for mod.rs or lib.rs
- if os.is_dir(module_path) {
- mod_rs_path := os.join_path(module_path, 'mod.rs')
- lib_rs_path := os.join_path(module_path, 'lib.rs')
-
- if os.exists(mod_rs_path) {
- result := get_function_from_file(mod_rs_path, function_name) or {
- if err.msg().contains('Function ${function_name} not found') {
- '' // Not found error, resolve or block to empty string
- } else {
- return err // Propagate other errors
- }
- }
- if result != '' { return result }
- }
-
- if os.exists(lib_rs_path) { // Changed else if to if
- result := get_function_from_file(lib_rs_path, function_name) or {
- if err.msg().contains('Function ${function_name} not found') {
- '' // Not found error, resolve or block to empty string
- } else {
- return err // Propagate other errors
- }
- }
- if result != '' { return result }
- }
-
- // Try to find the function in any Rust file in the directory
- files := os.ls(module_path) or {
- return error('Failed to list files in module directory: ${err}')
- }
-
- for file in files {
- if file.ends_with('.rs') {
- file_path := os.join_path(module_path, file)
- result := get_function_from_file(file_path, function_name) or {
- if err.msg().contains('Function ${function_name} not found') {
- '' // Not found error, resolve or block to empty string
- } else {
- return err // Propagate other errors
- }
- }
- if result != '' { return result } // Found it
- }
- }
-
- return error('Function ${function_name} not found in module ${module_path}')
- } else {
- // It's a file path, treat it as a direct file
- return get_function_from_file(module_path, function_name)
- }
+ // Check if directory exists
+ if !os.exists(module_path) {
+ return error('Module path not found: ${module_path}')
+ }
+
+ // If it's a directory, look for mod.rs or lib.rs
+ if os.is_dir(module_path) {
+ mod_rs_path := os.join_path(module_path, 'mod.rs')
+ lib_rs_path := os.join_path(module_path, 'lib.rs')
+
+ if os.exists(mod_rs_path) {
+ result := get_function_from_file(mod_rs_path, function_name) or {
+ if err.msg().contains('Function ${function_name} not found') {
+ '' // Not found error, resolve or block to empty string
+ } else {
+ return err // Propagate other errors
+ }
+ }
+ if result != '' {
+ return result
+ }
+ }
+
+ if os.exists(lib_rs_path) { // Changed else if to if
+ result := get_function_from_file(lib_rs_path, function_name) or {
+ if err.msg().contains('Function ${function_name} not found') {
+ '' // Not found error, resolve or block to empty string
+ } else {
+ return err // Propagate other errors
+ }
+ }
+ if result != '' {
+ return result
+ }
+ }
+
+ // Try to find the function in any Rust file in the directory
+ files := os.ls(module_path) or {
+ return error('Failed to list files in module directory: ${err}')
+ }
+
+ for file in files {
+ if file.ends_with('.rs') {
+ file_path := os.join_path(module_path, file)
+ result := get_function_from_file(file_path, function_name) or {
+ if err.msg().contains('Function ${function_name} not found') {
+ '' // Not found error, resolve or block to empty string
+ } else {
+ return err // Propagate other errors
+ }
+ }
+ if result != '' {
+ return result
+ }
+ // Found it
+ }
+ }
+
+ return error('Function ${function_name} not found in module ${module_path}')
+ } else {
+ // It's a file path, treat it as a direct file
+ return get_function_from_file(module_path, function_name)
+ }
}
// Get a function declaration from content by its name
pub fn get_function_from_content(content string, function_name string) !string {
- is_method := function_name.contains('::')
- mut struct_name := ''
- mut method_name := function_name
- if is_method {
- parts := function_name.split('::')
- if parts.len == 2 {
- struct_name = parts[0]
- method_name = parts[1]
- } else {
- return error('Invalid method format: ${function_name}')
- }
- }
-
- lines := content.split('\n')
- mut function_declaration := ''
- mut brace_level := 0
- mut function_start_line_found := false
- mut in_impl_block := false // Flag to track if we are inside the correct impl block
- mut impl_brace_level := 0 // To know when the impl block ends
+ is_method := function_name.contains('::')
+ mut struct_name := ''
+ mut method_name := function_name
+ if is_method {
+ parts := function_name.split('::')
+ if parts.len == 2 {
+ struct_name = parts[0]
+ method_name = parts[1]
+ } else {
+ return error('Invalid method format: ${function_name}')
+ }
+ }
- for line in lines {
- trimmed := line.trim_space()
- if trimmed.starts_with('//') { continue } // Skip single-line comments
+ lines := content.split('\n')
+ mut function_declaration := ''
+ mut brace_level := 0
+ mut function_start_line_found := false
+ mut in_impl_block := false // Flag to track if we are inside the correct impl block
+ mut impl_brace_level := 0 // To know when the impl block ends
- // Handle finding the correct impl block if it's a method
- if is_method && !in_impl_block {
- if trimmed.contains('impl') && trimmed.contains(struct_name) {
- in_impl_block = true
- // Calculate the brace level *before* this impl line
- // This is tricky, maybe just track entry/exit
- for c in line { if c == `{` { impl_brace_level += 1 } }
- continue // Don't process the impl line itself as the start
- }
- continue // Skip lines until the correct impl block is found
- }
+ for line in lines {
+ trimmed := line.trim_space()
+ if trimmed.starts_with('//') {
+ continue
+ }
+ // Skip single-line comments
- // Handle exiting the impl block
- if is_method && in_impl_block {
- current_line_brace_change := line.count('{') - line.count('}')
- if impl_brace_level + current_line_brace_change <= 0 { // Assuming impl starts at level 0 relative to its scope
- in_impl_block = false // Exited the impl block
- impl_brace_level = 0
- }
- impl_brace_level += current_line_brace_change
- }
+ // Handle finding the correct impl block if it's a method
+ if is_method && !in_impl_block {
+ if trimmed.contains('impl') && trimmed.contains(struct_name) {
+ in_impl_block = true
+ // Calculate the brace level *before* this impl line
+ // This is tricky, maybe just track entry/exit
+ for c in line {
+ if c == `{` {
+ impl_brace_level += 1
+ }
+ }
+ continue // Don't process the impl line itself as the start
+ }
+ continue // Skip lines until the correct impl block is found
+ }
- // Find the function/method start line
- if !function_start_line_found {
- mut is_target_line := false
- if is_method && in_impl_block {
- // Inside the correct impl, look for method
- is_target_line = trimmed.contains('fn ${method_name}') || trimmed.contains('fn ${method_name}<') // Handle generics
- } else if !is_method {
- // Look for standalone function
- is_target_line = trimmed.contains('fn ${function_name}') || trimmed.contains('fn ${function_name}<')
- }
+ // Handle exiting the impl block
+ if is_method && in_impl_block {
+ current_line_brace_change := line.count('{') - line.count('}')
+ if impl_brace_level + current_line_brace_change <= 0 { // Assuming impl starts at level 0 relative to its scope
+ in_impl_block = false // Exited the impl block
+ impl_brace_level = 0
+ }
+ impl_brace_level += current_line_brace_change
+ }
- if is_target_line {
- function_start_line_found = true
- function_declaration += line + '\n'
-
- // Count initial braces on the declaration line
- for c in line {
- if c == `{` {
- brace_level++
- } else if c == `}` {
- brace_level-- // Should ideally not happen on decl line
- }
- }
-
- // Handle single-line functions like `fn simple() -> i32 { 42 }` or trait methods ending with `;`
- if brace_level == 0 && (line.contains('}') || line.contains(';')) {
- break // Function definition is complete on this line
- }
- continue // Move to next line after finding the start
- }
- }
+ // Find the function/method start line
+ if !function_start_line_found {
+ mut is_target_line := false
+ if is_method && in_impl_block {
+ // Inside the correct impl, look for method
+ is_target_line = trimmed.contains('fn ${method_name}')
+ || trimmed.contains('fn ${method_name}<') // Handle generics
+ } else if !is_method {
+ // Look for standalone function
+ is_target_line = trimmed.contains('fn ${function_name}')
+ || trimmed.contains('fn ${function_name}<')
+ }
- // If function start found, append lines and track braces
- if function_start_line_found {
- function_declaration += line + '\n'
-
- // Count braces to determine when the function ends
- for c in line {
- if c == `{` {
- brace_level++
- } else if c == `}` {
- brace_level--
- }
- }
+ if is_target_line {
+ function_start_line_found = true
+ function_declaration += line + '\n'
- // Check if function ended
- if brace_level <= 0 { // <= 0 to handle potential formatting issues
- break
- }
- }
- }
+ // Count initial braces on the declaration line
+ for c in line {
+ if c == `{` {
+ brace_level++
+ } else if c == `}` {
+ brace_level-- // Should ideally not happen on decl line
+ }
+ }
- if function_declaration == '' {
- return error('Function ${function_name} not found in content')
- }
+ // Handle single-line functions like `fn simple() -> i32 { 42 }` or trait methods ending with `;`
+ if brace_level == 0 && (line.contains('}') || line.contains(';')) {
+ break // Function definition is complete on this line
+ }
+ continue // Move to next line after finding the start
+ }
+ }
- return function_declaration.trim_space()
+ // If function start found, append lines and track braces
+ if function_start_line_found {
+ function_declaration += line + '\n'
+
+ // Count braces to determine when the function ends
+ for c in line {
+ if c == `{` {
+ brace_level++
+ } else if c == `}` {
+ brace_level--
+ }
+ }
+
+ // Check if function ended
+ if brace_level <= 0 { // <= 0 to handle potential formatting issues
+ break
+ }
+ }
+ }
+
+ if function_declaration == '' {
+ return error('Function ${function_name} not found in content')
+ }
+
+ return function_declaration.trim_space()
}
// Get a struct declaration from a file by its name
pub fn get_struct_from_file(file_path string, struct_name string) !string {
- // Check if file exists
- if !os.exists(file_path) {
- return error('File not found: ${file_path}')
- }
-
- // Read file content
- content := os.read_file(file_path) or {
- return error('Failed to read file: ${err}')
- }
-
- return get_struct_from_content(content, struct_name)
+ // Check if file exists
+ if !os.exists(file_path) {
+ return error('File not found: ${file_path}')
+ }
+
+ // Read file content
+ content := os.read_file(file_path) or { return error('Failed to read file: ${err}') }
+
+ return get_struct_from_content(content, struct_name)
}
// Get a struct declaration from a module by its name
pub fn get_struct_from_module(module_path string, struct_name string) !string {
- // Check if directory exists
- if !os.exists(module_path) {
- return error('Module path not found: ${module_path}')
- }
-
- // If it's a directory, look for mod.rs or lib.rs
- if os.is_dir(module_path) {
- mod_rs_path := os.join_path(module_path, 'mod.rs')
- lib_rs_path := os.join_path(module_path, 'lib.rs')
-
- if os.exists(mod_rs_path) {
- result := get_struct_from_file(mod_rs_path, struct_name) or {
- if err.msg().contains('Struct ${struct_name} not found') {
- '' // Not found error, resolve or block to empty string
- } else {
- return err // Propagate other errors
- }
- }
- if result != '' { return result }
- }
-
- if os.exists(lib_rs_path) { // Changed else if to if
- result := get_struct_from_file(lib_rs_path, struct_name) or {
- if err.msg().contains('Struct ${struct_name} not found') {
- '' // Not found error, resolve or block to empty string
- } else {
- return err // Propagate other errors
- }
- }
- if result != '' { return result }
- }
-
- // Try to find the struct in any Rust file in the directory
- files := os.ls(module_path) or {
- return error('Failed to list files in module directory: ${err}')
- }
-
- for file in files {
- if file.ends_with('.rs') {
- file_path := os.join_path(module_path, file)
- result := get_struct_from_file(file_path, struct_name) or {
- if err.msg().contains('Struct ${struct_name} not found') {
- '' // Not found error, resolve or block to empty string
- } else {
- return err // Propagate other errors
- }
- }
- if result != '' { return result } // Found it
- }
- }
-
- return error('Struct ${struct_name} not found in module ${module_path}')
- } else {
- // It's a file path, treat it as a direct file
- return get_struct_from_file(module_path, struct_name)
- }
+ // Check if directory exists
+ if !os.exists(module_path) {
+ return error('Module path not found: ${module_path}')
+ }
+
+ // If it's a directory, look for mod.rs or lib.rs
+ if os.is_dir(module_path) {
+ mod_rs_path := os.join_path(module_path, 'mod.rs')
+ lib_rs_path := os.join_path(module_path, 'lib.rs')
+
+ if os.exists(mod_rs_path) {
+ result := get_struct_from_file(mod_rs_path, struct_name) or {
+ if err.msg().contains('Struct ${struct_name} not found') {
+ '' // Not found error, resolve or block to empty string
+ } else {
+ return err // Propagate other errors
+ }
+ }
+ if result != '' {
+ return result
+ }
+ }
+
+ if os.exists(lib_rs_path) { // Changed else if to if
+ result := get_struct_from_file(lib_rs_path, struct_name) or {
+ if err.msg().contains('Struct ${struct_name} not found') {
+ '' // Not found error, resolve or block to empty string
+ } else {
+ return err // Propagate other errors
+ }
+ }
+ if result != '' {
+ return result
+ }
+ }
+
+ // Try to find the struct in any Rust file in the directory
+ files := os.ls(module_path) or {
+ return error('Failed to list files in module directory: ${err}')
+ }
+
+ for file in files {
+ if file.ends_with('.rs') {
+ file_path := os.join_path(module_path, file)
+ result := get_struct_from_file(file_path, struct_name) or {
+ if err.msg().contains('Struct ${struct_name} not found') {
+ '' // Not found error, resolve or block to empty string
+ } else {
+ return err // Propagate other errors
+ }
+ }
+ if result != '' {
+ return result
+ }
+ // Found it
+ }
+ }
+
+ return error('Struct ${struct_name} not found in module ${module_path}')
+ } else {
+ // It's a file path, treat it as a direct file
+ return get_struct_from_file(module_path, struct_name)
+ }
}
// Get a struct declaration from content by its name
pub fn get_struct_from_content(content string, struct_name string) !string {
- lines := content.split('\n')
-
- mut in_comment_block := false
- mut brace_level := 0 // Tracks brace level *within* the target struct
- mut struct_declaration := ''
- mut struct_start_line_found := false
+ lines := content.split('\n')
- for line in lines {
- trimmed := line.trim_space()
- if trimmed.starts_with('//') { continue } // Skip single-line comments
+ mut in_comment_block := false
+ mut brace_level := 0 // Tracks brace level *within* the target struct
+ mut struct_declaration := ''
+ mut struct_start_line_found := false
- // Handle block comments
- if trimmed.starts_with('/*') {
- in_comment_block = true
- }
- if in_comment_block {
- if trimmed.contains('*/') { in_comment_block = false }
- continue
- }
+ for line in lines {
+ trimmed := line.trim_space()
+ if trimmed.starts_with('//') {
+ continue
+ }
+ // Skip single-line comments
- // Find the struct start line
- if !struct_start_line_found {
- // Check for `pub struct Name` or `struct Name` followed by { or ;
- if (trimmed.starts_with('pub struct ${struct_name}') || trimmed.starts_with('struct ${struct_name}')) &&
- (trimmed.contains('{') || trimmed.ends_with(';') || trimmed.contains(' where ') || trimmed.contains('<')) {
-
- // Basic check to avoid matching struct names that are substrings of others
- // Example: Don't match `MyStructExtended` when looking for `MyStruct`
- // This is a simplified check, regex might be more robust
- name_part := trimmed.all_after('struct ').trim_space()
- if name_part.starts_with(struct_name) {
- // Check if the character after the name is one that indicates end of name ('{', ';', '<', '(' or whitespace)
- char_after := if name_part.len > struct_name.len { name_part[struct_name.len] } else { u8(` `) }
- if char_after == u8(`{`) || char_after == u8(`;`) || char_after == u8(`<`) || char_after == u8(`(`) || char_after.is_space() {
- struct_start_line_found = true
- struct_declaration += line + '\n'
-
- // Count initial braces/check for semicolon on the declaration line
- for c in line {
- if c == `{` { brace_level++ }
- else if c == `}` { brace_level-- } // Should not happen on decl line
- }
-
- // Handle unit structs ending with semicolon
- if trimmed.ends_with(';') {
- break // Struct definition is complete on this line
- }
-
- // Handle single-line structs like `struct Simple { field: i32 }`
- if brace_level == 0 && line.contains('{') && line.contains('}') {
- break // Struct definition is complete on this line
- }
- continue // Move to next line after finding the start
- }
- }
- }
- }
+ // Handle block comments
+ if trimmed.starts_with('/*') {
+ in_comment_block = true
+ }
+ if in_comment_block {
+ if trimmed.contains('*/') {
+ in_comment_block = false
+ }
+ continue
+ }
- // If struct start found, append lines and track braces
- if struct_start_line_found {
- struct_declaration += line + '\n'
-
- // Count braces to determine when the struct ends
- for c in line {
- if c == `{` { brace_level++ }
- else if c == `}` { brace_level-- }
- }
+ // Find the struct start line
+ if !struct_start_line_found {
+ // Check for `pub struct Name` or `struct Name` followed by { or ;
+ if (trimmed.starts_with('pub struct ${struct_name}')
+ || trimmed.starts_with('struct ${struct_name}'))
+ && (trimmed.contains('{') || trimmed.ends_with(';')
+ || trimmed.contains(' where ') || trimmed.contains('<')) {
+ // Basic check to avoid matching struct names that are substrings of others
+ // Example: Don't match `MyStructExtended` when looking for `MyStruct`
+ // This is a simplified check, regex might be more robust
+ name_part := trimmed.all_after('struct ').trim_space()
+ if name_part.starts_with(struct_name) {
+ // Check if the character after the name is one that indicates end of name ('{', ';', '<', '(' or whitespace)
+ char_after := if name_part.len > struct_name.len {
+ name_part[struct_name.len]
+ } else {
+ u8(` `)
+ }
+ if char_after == u8(`{`) || char_after == u8(`;`) || char_after == u8(`<`)
+ || char_after == u8(`(`) || char_after.is_space() {
+ struct_start_line_found = true
+ struct_declaration += line + '\n'
- // Check if struct ended
- if brace_level <= 0 { // <= 0 handles potential formatting issues or initial non-zero level
- break
- }
- }
- }
+ // Count initial braces/check for semicolon on the declaration line
+ for c in line {
+ if c == `{` {
+ brace_level++
+ } else if c == `}` {
+ brace_level--
+ }
+ // Should not happen on decl line
+ }
- if struct_declaration == '' {
- return error('Struct ${struct_name} not found in content')
- }
+ // Handle unit structs ending with semicolon
+ if trimmed.ends_with(';') {
+ break // Struct definition is complete on this line
+ }
- return struct_declaration.trim_space()
+ // Handle single-line structs like `struct Simple { field: i32 }`
+ if brace_level == 0 && line.contains('{') && line.contains('}') {
+ break // Struct definition is complete on this line
+ }
+ continue // Move to next line after finding the start
+ }
+ }
+ }
+ }
+
+ // If struct start found, append lines and track braces
+ if struct_start_line_found {
+ struct_declaration += line + '\n'
+
+ // Count braces to determine when the struct ends
+ for c in line {
+ if c == `{` {
+ brace_level++
+ } else if c == `}` {
+ brace_level--
+ }
+ }
+
+ // Check if struct ended
+ if brace_level <= 0 { // <= 0 handles potential formatting issues or initial non-zero level
+ break
+ }
+ }
+ }
+
+ if struct_declaration == '' {
+ return error('Struct ${struct_name} not found in content')
+ }
+
+ return struct_declaration.trim_space()
}
// Struct to hold parsed struct information
@@ -1049,7 +1075,8 @@ pub fn parse_rust_struct(definition string) !StructInfo {
}
// Inside the struct definition, parse fields (skip comments and attributes)
- if brace_level > 0 && !trimmed_line.starts_with('//') && !trimmed_line.starts_with('#[') && trimmed_line.contains(':') {
+ if brace_level > 0 && !trimmed_line.starts_with('//') && !trimmed_line.starts_with('#[')
+ && trimmed_line.contains(':') {
parts := trimmed_line.split(':')
if parts.len >= 2 {
// Extract field name (handle potential 'pub ')
@@ -1068,7 +1095,8 @@ pub fn parse_rust_struct(definition string) !StructInfo {
}
// Skip attributes or comments if they somehow got here (e.g. line ending comments)
- if field_name.starts_with('[') || field_name.starts_with('/') || field_name == '' {
+ if field_name.starts_with('[') || field_name.starts_with('/')
+ || field_name == '' {
continue
}
@@ -1091,26 +1119,26 @@ pub fn parse_rust_struct(definition string) !StructInfo {
// Find the project root directory (the one containing Cargo.toml)
fn find_project_root(path string) string {
mut current_path := path
-
+
// If path is a file, get its directory
if !os.is_dir(current_path) {
current_path = os.dir(current_path)
}
-
+
// Look up parent directories until we find a Cargo.toml
for i := 0; i < 10; i++ { // Limit depth to avoid infinite loops
cargo_path := os.join_path(current_path, 'Cargo.toml')
if os.exists(cargo_path) {
return current_path
}
-
+
parent_dir := os.dir(current_path)
if parent_dir == current_path {
break // We've reached the filesystem root
}
current_path = parent_dir
}
-
+
return '' // No project root found
}
@@ -1120,23 +1148,23 @@ pub fn get_module_dependency(importer_path string, module_path string) !ModuleDe
if !os.exists(importer_path) {
return error('Importer path does not exist: ${importer_path}')
}
-
+
if !os.exists(module_path) {
return error('Module path does not exist: ${module_path}')
}
-
+
// Get import statement
import_statement := generate_import_statement(importer_path, module_path)! // Use local function
-
+
// Try to find the project roots for both paths
importer_project_root := find_project_root(importer_path)
module_project_root := find_project_root(module_path)
-
+
mut dependency := ModuleDependency{
import_statement: import_statement
- module_path: module_path
+ module_path: module_path
}
-
+
// If they're in different projects, we need to extract dependency information
if importer_project_root != module_project_root && module_project_root != '' {
cargo_path := os.join_path(module_project_root, 'Cargo.toml')
@@ -1146,26 +1174,26 @@ pub fn get_module_dependency(importer_path string, module_path string) !ModuleDe
return dependency // Return what we have if we can't get package info
}
dependency.package_name = pkg_info.name
-
+
// Extract version from Cargo.toml if possible
dependencies := extract_dependencies(cargo_path) or {
return dependency // Return what we have if we can't extract dependencies
}
-
+
// Check if the package is already a dependency
importer_cargo_path := os.join_path(importer_project_root, 'Cargo.toml')
if os.exists(importer_cargo_path) {
importer_dependencies := extract_dependencies(importer_cargo_path) or {
map[string]string{} // Empty map if we can't extract dependencies
}
-
+
// Check if package is already a dependency
if pkg_info.name in importer_dependencies {
dependency.is_already_dependency = true
dependency.current_version = importer_dependencies[pkg_info.name]
}
}
-
+
// Add cargo dependency line
dependency.cargo_dependency = '${pkg_info.name} = ""' // Placeholder for version
}
@@ -1173,7 +1201,7 @@ pub fn get_module_dependency(importer_path string, module_path string) !ModuleDe
// Same project, no need for external dependency
dependency.is_in_same_project = true
}
-
+
return dependency
}
@@ -1187,4 +1215,4 @@ pub mut:
current_version string // Current version if already a dependency
is_already_dependency bool // Whether the package is already a dependency
is_in_same_project bool // Whether the module is in the same project
-}
\ No newline at end of file
+}
diff --git a/lib/lang/rust/rust_test.v b/lib/lang/rust/rust_test.v
index 29da1562..c65d4ae6 100644
--- a/lib/lang/rust/rust_test.v
+++ b/lib/lang/rust/rust_test.v
@@ -22,7 +22,7 @@ pub fn another_function() -> i32 {
}
'
functions := rust.extract_functions_from_content(content)
-
+
assert functions.len == 3
assert functions[0] == 'public_function'
assert functions[1] == 'private_function'
@@ -47,7 +47,7 @@ pub struct GenericStruct {
}
'
structs := rust.extract_structs_from_content(content)
-
+
assert structs.len == 3
assert structs[0] == 'PublicStruct'
assert structs[1] == 'PrivateStruct'
@@ -69,7 +69,7 @@ fn main() {
}
'
imports := rust.extract_imports_from_content(content)
-
+
assert imports.len == 3
assert imports[0] == 'std::io'
assert imports[1] == 'std::fs::File'
@@ -79,7 +79,7 @@ fn main() {
fn test_get_module_name() {
// Test regular file
assert rust.get_module_name('/path/to/file.rs') == 'file'
-
+
// Test mod.rs file
assert rust.get_module_name('/path/to/module/mod.rs') == 'module'
}
@@ -88,10 +88,8 @@ fn test_get_module_name() {
fn setup_test_files() !string {
// Create temporary directory
tmp_dir := os.join_path(os.temp_dir(), 'rust_test_${os.getpid()}')
- os.mkdir_all(tmp_dir) or {
- return error('Failed to create temporary directory: ${err}')
- }
-
+ os.mkdir_all(tmp_dir) or { return error('Failed to create temporary directory: ${err}') }
+
// Create test file
test_file_content := '
// This is a test file
@@ -110,13 +108,13 @@ fn private_function() {
println!("Private function");
}
'
-
+
test_file_path := os.join_path(tmp_dir, 'test_file.rs')
os.write_file(test_file_path, test_file_content) or {
os.rmdir_all(tmp_dir) or {}
return error('Failed to write test file: ${err}')
}
-
+
// Create mod.rs file
mod_file_content := '
// This is a mod file
@@ -126,33 +124,33 @@ pub fn mod_function() {
println!("Mod function");
}
'
-
+
mod_file_path := os.join_path(tmp_dir, 'mod.rs')
os.write_file(mod_file_path, mod_file_content) or {
os.rmdir_all(tmp_dir) or {}
return error('Failed to write mod file: ${err}')
}
-
+
// Create submodule directory with mod.rs
submod_dir := os.join_path(tmp_dir, 'submodule')
os.mkdir_all(submod_dir) or {
os.rmdir_all(tmp_dir) or {}
return error('Failed to create submodule directory: ${err}')
}
-
+
submod_file_content := '
// This is a submodule mod file
pub fn submod_function() {
println!("Submodule function");
}
'
-
+
submod_file_path := os.join_path(submod_dir, 'mod.rs')
os.write_file(submod_file_path, submod_file_content) or {
os.rmdir_all(tmp_dir) or {}
return error('Failed to write submodule mod file: ${err}')
}
-
+
// Create Cargo.toml
cargo_content := '
[package]
@@ -164,13 +162,13 @@ edition = "2021"
serde = "1.0"
tokio = { version = "1.25", features = ["full"] }
'
-
+
cargo_path := os.join_path(tmp_dir, 'Cargo.toml')
os.write_file(cargo_path, cargo_content) or {
os.rmdir_all(tmp_dir) or {}
return error('Failed to write Cargo.toml: ${err}')
}
-
+
return tmp_dir
}
@@ -181,10 +179,10 @@ fn teardown_test_files(tmp_dir string) {
fn test_list_functions_in_file() ! {
tmp_dir := setup_test_files()!
defer { teardown_test_files(tmp_dir) }
-
+
test_file_path := os.join_path(tmp_dir, 'test_file.rs')
functions := rust.list_functions_in_file(test_file_path)!
-
+
assert functions.len == 2
assert functions.contains('test_function')
assert functions.contains('private_function')
@@ -193,10 +191,10 @@ fn test_list_functions_in_file() ! {
fn test_list_structs_in_file() ! {
tmp_dir := setup_test_files()!
defer { teardown_test_files(tmp_dir) }
-
+
test_file_path := os.join_path(tmp_dir, 'test_file.rs')
structs := rust.list_structs_in_file(test_file_path)!
-
+
assert structs.len == 1
assert structs[0] == 'TestStruct'
}
@@ -204,10 +202,10 @@ fn test_list_structs_in_file() ! {
fn test_extract_imports() ! {
tmp_dir := setup_test_files()!
defer { teardown_test_files(tmp_dir) }
-
+
test_file_path := os.join_path(tmp_dir, 'test_file.rs')
imports := rust.extract_imports(test_file_path)!
-
+
assert imports.len == 2
assert imports[0] == 'std::io'
assert imports[1] == 'std::fs::File'
@@ -216,9 +214,9 @@ fn test_extract_imports() ! {
fn test_list_modules_in_directory() ! {
tmp_dir := setup_test_files()!
defer { teardown_test_files(tmp_dir) }
-
+
modules := rust.list_modules_in_directory(tmp_dir)!
-
+
// Should contain the module itself (mod.rs), test_file.rs and submodule directory
assert modules.len == 3
assert modules.contains(os.base(tmp_dir)) // Directory name (mod.rs)
@@ -229,10 +227,10 @@ fn test_list_modules_in_directory() ! {
fn test_extract_dependencies() ! {
tmp_dir := setup_test_files()!
defer { teardown_test_files(tmp_dir) }
-
+
cargo_path := os.join_path(tmp_dir, 'Cargo.toml')
dependencies := rust.extract_dependencies(cargo_path)!
-
+
assert dependencies.len == 2
assert dependencies['serde'] == '1.0'
assert dependencies['tokio'] == '{ version = "1.25", features = ["full"] }'
@@ -243,14 +241,14 @@ fn test_extract_impl_methods() {
assert false, 'Failed to read test_impl.rs: ${err}'
return
}
-
+
functions := rust.extract_functions_from_content(test_impl_content)
-
+
assert functions.len == 3
assert functions[0] == 'Currency::new'
assert functions[1] == 'Currency::to_usd'
assert functions[2] == 'Currency::to_currency'
-
+
println('Extracted functions:')
for f in functions {
println(' "${f}"')
@@ -305,9 +303,9 @@ fn test_get_function_from_content() {
}
expected3 := 'fn internal_method(&self) {\n println!("Internal");\n }'
assert decl3.trim_space() == expected3
-
+
// Test function not found
- _ := rust.get_function_from_content(content, 'non_existent_function') or {
+ _ := rust.get_function_from_content(content, 'non_existent_function') or {
assert err.msg() == 'Function non_existent_function not found in content'
return
}
@@ -384,7 +382,7 @@ fn test_get_struct_from_content() {
assert decl5.trim_space() == expected5
// Test struct not found
- _ := rust.get_struct_from_content(content, 'non_existent_struct') or {
+ _ := rust.get_struct_from_content(content, 'non_existent_struct') or {
assert err.msg() == 'Struct non_existent_struct not found in content'
return
}
@@ -394,10 +392,10 @@ fn test_get_struct_from_content() {
fn test_get_struct_from_file() ! {
tmp_dir := setup_test_files()!
defer { teardown_test_files(tmp_dir) }
-
+
test_file_path := os.join_path(tmp_dir, 'test_file.rs')
structs := rust.list_structs_in_file(test_file_path)!
-
+
assert structs.len == 1
assert structs[0] == 'TestStruct'
}
@@ -405,9 +403,9 @@ fn test_get_struct_from_file() ! {
fn test_get_struct_from_module() ! {
tmp_dir := setup_test_files()!
defer { teardown_test_files(tmp_dir) }
-
+
modules := rust.list_modules_in_directory(tmp_dir)!
-
+
// Should contain the module itself (mod.rs), test_file.rs and submodule directory
assert modules.len == 3
assert modules.contains(os.base(tmp_dir)) // Directory name (mod.rs)
diff --git a/lib/mcp/backend_memory.v b/lib/mcp/backend_memory.v
index 349caad6..862cc323 100644
--- a/lib/mcp/backend_memory.v
+++ b/lib/mcp/backend_memory.v
@@ -109,16 +109,14 @@ fn (b &MemoryBackend) prompt_messages_get(name string, arguments map[string]stri
return messages
}
-
fn (b &MemoryBackend) prompt_call(name string, arguments []string) ![]PromptMessage {
// Get the tool handler
handler := b.prompt_handlers[name] or { return error('tool handler not found') }
// Call the handler with the provided arguments
- return handler(arguments) or {panic(err)}
+ return handler(arguments) or { panic(err) }
}
-
// Tool related methods
fn (b &MemoryBackend) tool_exists(name string) !bool {
diff --git a/lib/mcp/baobab/baobab_tools.v b/lib/mcp/baobab/baobab_tools.v
index cfa09afa..9c69f5fc 100644
--- a/lib/mcp/baobab/baobab_tools.v
+++ b/lib/mcp/baobab/baobab_tools.v
@@ -8,160 +8,165 @@ import freeflowuniverse.herolib.baobab.generator
import freeflowuniverse.herolib.baobab.specification
// generate_methods_file MCP Tool
-//
+//
const generate_methods_file_tool = mcp.Tool{
- name: 'generate_methods_file'
- description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- })}
- required: ['source']
- }
+ name: 'generate_methods_file'
+ description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'source': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ })
+ }
+ required: ['source']
+ }
}
pub fn (d &Baobab) generate_methods_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- source := json.decode[generator.Source](arguments["source"].str())!
- result := generator.generate_methods_file_str(source)
- or {
+ source := json.decode[generator.Source](arguments['source'].str())!
+ result := generator.generate_methods_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_module_from_openapi MCP Tool
const generate_module_from_openapi_tool = mcp.Tool{
- name: 'generate_module_from_openapi'
- description: ''
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })}
- required: ['openapi_path']
- }
+ name: 'generate_module_from_openapi'
+ description: ''
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ required: ['openapi_path']
+ }
}
pub fn (d &Baobab) generate_module_from_openapi_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- openapi_path := arguments["openapi_path"].str()
- result := generator.generate_module_from_openapi(openapi_path)
- or {
+ openapi_path := arguments['openapi_path'].str()
+ result := generator.generate_module_from_openapi(openapi_path) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_methods_interface_file MCP Tool
const generate_methods_interface_file_tool = mcp.Tool{
- name: 'generate_methods_interface_file'
- description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- })}
- required: ['source']
- }
+ name: 'generate_methods_interface_file'
+ description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'source': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ })
+ }
+ required: ['source']
+ }
}
pub fn (d &Baobab) generate_methods_interface_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- source := json.decode[generator.Source](arguments["source"].str())!
- result := generator.generate_methods_interface_file_str(source)
- or {
+ source := json.decode[generator.Source](arguments['source'].str())!
+ result := generator.generate_methods_interface_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_model_file MCP Tool
const generate_model_file_tool = mcp.Tool{
- name: 'generate_model_file'
- description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- })}
- required: ['source']
- }
+ name: 'generate_model_file'
+ description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'source': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ })
+ }
+ required: ['source']
+ }
}
pub fn (d &Baobab) generate_model_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- source := json.decode[generator.Source](arguments["source"].str())!
- result := generator.generate_model_file_str(source)
- or {
+ source := json.decode[generator.Source](arguments['source'].str())!
+ result := generator.generate_model_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_methods_example_file MCP Tool
const generate_methods_example_file_tool = mcp.Tool{
- name: 'generate_methods_example_file'
- description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- })}
- required: ['source']
- }
+ name: 'generate_methods_example_file'
+ description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'source': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ })
+ }
+ required: ['source']
+ }
}
pub fn (d &Baobab) generate_methods_example_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- source := json.decode[generator.Source](arguments["source"].str())!
- result := generator.generate_methods_example_file_str(source)
- or {
+ source := json.decode[generator.Source](arguments['source'].str())!
+ result := generator.generate_methods_example_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/baobab/baobab_tools_test.v b/lib/mcp/baobab/baobab_tools_test.v
index 3101e129..9c6c51a3 100644
--- a/lib/mcp/baobab/baobab_tools_test.v
+++ b/lib/mcp/baobab/baobab_tools_test.v
@@ -13,7 +13,7 @@ import os
fn test_generate_module_from_openapi_tool() {
// Verify the tool definition
assert generate_module_from_openapi_tool.name == 'generate_module_from_openapi', 'Tool name should be "generate_module_from_openapi"'
-
+
// Verify the input schema
assert generate_module_from_openapi_tool.input_schema.typ == 'object', 'Input schema type should be "object"'
assert 'openapi_path' in generate_module_from_openapi_tool.input_schema.properties, 'Input schema should have "openapi_path" property'
@@ -26,14 +26,14 @@ fn test_generate_module_from_openapi_tool_handler_error() {
// Create arguments with a non-existent file path
mut arguments := map[string]json2.Any{}
arguments['openapi_path'] = json2.Any('non_existent_file.yaml')
-
+
// Call the handler
result := generate_module_from_openapi_tool_handler(arguments) or {
// If the handler returns an error, that's expected
assert err.msg().contains(''), 'Error message should not be empty'
return
}
-
+
// If we get here, the handler should have returned an error result
assert result.is_error, 'Result should indicate an error'
assert result.content.len > 0, 'Error content should not be empty'
@@ -48,7 +48,7 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to create MCP server: ${err}'
return
}
-
+
// Create a temporary OpenAPI file for testing
temp_dir := os.temp_dir()
temp_file := os.join_path(temp_dir, 'test_openapi.yaml')
@@ -56,30 +56,30 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to create temporary file: ${err}'
return
}
-
+
// Sample tool call request
tool_call_request := '{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"generate_module_from_openapi","arguments":{"openapi_path":"${temp_file}"}}}'
-
+
// Process the request through the handler
response := server.handler.handle(tool_call_request) or {
// Clean up the temporary file
os.rm(temp_file) or {}
-
+
// If the handler returns an error, that's expected in this test environment
// since we might not have all dependencies set up
return
}
-
+
// Clean up the temporary file
os.rm(temp_file) or {}
-
+
// Decode the response to verify its structure
decoded_response := jsonrpc.decode_response(response) or {
// In a test environment, we might get an error due to missing dependencies
// This is acceptable for this test
return
}
-
+
// If we got a successful response, verify it
if !decoded_response.is_error() {
// Parse the result to verify its contents
@@ -87,15 +87,15 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to get result: ${err}'
return
}
-
+
// Decode the result to check the content
result_map := json2.raw_decode(result_json) or {
assert false, 'Failed to decode result: ${err}'
return
}.as_map()
-
+
// Verify the result structure
assert 'isError' in result_map, 'Result should have isError field'
assert 'content' in result_map, 'Result should have content field'
}
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/baobab/command.v b/lib/mcp/baobab/command.v
index 7fe612ba..d75a67c8 100644
--- a/lib/mcp/baobab/command.v
+++ b/lib/mcp/baobab/command.v
@@ -2,22 +2,21 @@ module baobab
import cli
-pub const command := cli.Command{
- sort_flags: true
- name: 'baobab'
+pub const command = cli.Command{
+ sort_flags: true
+ name: 'baobab'
// execute: cmd_mcpgen
description: 'baobab command'
- commands: [
+ commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the Baobab server'
- }
+ },
]
-
}
fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server(&Baobab{})!
server.start()!
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/baobab/mcp_test.v b/lib/mcp/baobab/mcp_test.v
index 0d33df6a..2ffa7898 100644
--- a/lib/mcp/baobab/mcp_test.v
+++ b/lib/mcp/baobab/mcp_test.v
@@ -67,7 +67,7 @@ fn test_mcp_server_initialize() {
// Verify the protocol version matches what was requested
assert result.protocol_version == '2024-11-05', 'Protocol version should match the request'
-
+
// Verify server info
assert result.server_info.name == 'developer', 'Server name should be "developer"'
}
@@ -113,7 +113,7 @@ fn test_tools_list() {
// Verify that the tools array exists and contains the expected tool
tools := result_map['tools'].arr()
assert tools.len > 0, 'Tools list should not be empty'
-
+
// Find the generate_module_from_openapi tool
mut found_tool := false
for tool in tools {
@@ -123,6 +123,6 @@ fn test_tools_list() {
break
}
}
-
+
assert found_tool, 'generate_module_from_openapi tool should be registered'
}
diff --git a/lib/mcp/baobab/server.v b/lib/mcp/baobab/server.v
index cee8253a..d0d4942a 100644
--- a/lib/mcp/baobab/server.v
+++ b/lib/mcp/baobab/server.v
@@ -13,18 +13,18 @@ pub fn new_mcp_server(v &Baobab) !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
- 'generate_module_from_openapi': generate_module_from_openapi_tool
- 'generate_methods_file': generate_methods_file_tool
+ 'generate_module_from_openapi': generate_module_from_openapi_tool
+ 'generate_methods_file': generate_methods_file_tool
'generate_methods_interface_file': generate_methods_interface_file_tool
- 'generate_model_file': generate_model_file_tool
- 'generate_methods_example_file': generate_methods_example_file_tool
+ 'generate_model_file': generate_model_file_tool
+ 'generate_methods_example_file': generate_methods_example_file_tool
}
tool_handlers: {
- 'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler
- 'generate_methods_file': v.generate_methods_file_tool_handler
+ 'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler
+ 'generate_methods_file': v.generate_methods_file_tool_handler
'generate_methods_interface_file': v.generate_methods_interface_file_tool_handler
- 'generate_model_file': v.generate_model_file_tool_handler
- 'generate_methods_example_file': v.generate_methods_example_file_tool_handler
+ 'generate_model_file': v.generate_model_file_tool_handler
+ 'generate_methods_example_file': v.generate_methods_example_file_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -35,4 +35,4 @@ pub fn new_mcp_server(v &Baobab) !&mcp.Server {
}
})!
return server
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/cmd/compile.vsh b/lib/mcp/cmd/compile.vsh
index 0f994400..687d701f 100755
--- a/lib/mcp/cmd/compile.vsh
+++ b/lib/mcp/cmd/compile.vsh
@@ -13,20 +13,20 @@ prod_mode := fp.bool('prod', `p`, false, 'Build production version (optimized)')
help_requested := fp.bool('help', `h`, false, 'Show help message')
if help_requested {
- println(fp.usage())
- exit(0)
+ println(fp.usage())
+ exit(0)
}
additional_args := fp.finalize() or {
- eprintln(err)
- println(fp.usage())
- exit(1)
+ eprintln(err)
+ println(fp.usage())
+ exit(1)
}
if additional_args.len > 0 {
- eprintln('Unexpected arguments: ${additional_args.join(' ')}')
- println(fp.usage())
- exit(1)
+ eprintln('Unexpected arguments: ${additional_args.join(' ')}')
+ println(fp.usage())
+ exit(1)
}
// Change to the mcp directory
@@ -36,20 +36,20 @@ os.chdir(mcp_dir) or { panic('Failed to change directory to ${mcp_dir}: ${err}')
// Set MCPPATH based on OS
mut mcppath := '/usr/local/bin/mcp'
if os.user_os() == 'macos' {
- mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp')
+ mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp')
}
// Set compilation command based on OS and mode
compile_cmd := if prod_mode {
- 'v -enable-globals -w -n -prod mcp.v'
+ 'v -enable-globals -w -n -prod mcp.v'
} else {
- 'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v'
+ 'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v'
}
println('Building MCP in ${if prod_mode { 'production' } else { 'debug' }} mode...')
if os.system(compile_cmd) != 0 {
- panic('Failed to compile mcp.v with command: ${compile_cmd}')
+ panic('Failed to compile mcp.v with command: ${compile_cmd}')
}
// Make executable
diff --git a/lib/mcp/cmd/mcp.v b/lib/mcp/cmd/mcp.v
index ca64f92f..c15e7845 100644
--- a/lib/mcp/cmd/mcp.v
+++ b/lib/mcp/cmd/mcp.v
@@ -44,11 +44,11 @@ mcp
description: 'show verbose output'
})
- mut cmd_inspector := cli.Command{
+ mut cmd_inspector := Command{
sort_flags: true
name: 'inspector'
execute: cmd_inspector_execute
- description: 'will list existing mdbooks'
+ description: 'will list existing mdbooks'
}
cmd_inspector.add_flag(Flag{
@@ -67,7 +67,6 @@ mcp
description: 'open inspector'
})
-
cmd_mcp.add_command(rhai_mcp.command)
// cmd_mcp.add_command(baobab.command)
// cmd_mcp.add_command(vcode.command)
@@ -77,7 +76,7 @@ mcp
cmd_mcp.parse(os.args)
}
-fn cmd_inspector_execute(cmd cli.Command) ! {
+fn cmd_inspector_execute(cmd Command) ! {
open := cmd.flags.get_bool('open') or { false }
if open {
osal.exec(cmd: 'open http://localhost:5173')!
diff --git a/lib/mcp/factory.v b/lib/mcp/factory.v
index ae538a1d..918b624f 100644
--- a/lib/mcp/factory.v
+++ b/lib/mcp/factory.v
@@ -16,32 +16,29 @@ pub:
// new_server creates a new MCP server
pub fn new_server(backend Backend, params ServerParams) !&Server {
mut server := &Server{
- ServerConfiguration: params.config,
- backend: backend,
+ ServerConfiguration: params.config
+ backend: backend
}
// Create a handler with the core MCP procedures registered
handler := jsonrpc.new_handler(jsonrpc.Handler{
procedures: {
- ...params.handlers,
+ // ...params.handlers,
// Core handlers
- 'initialize': server.initialize_handler,
- 'notifications/initialized': initialized_notification_handler,
-
+ 'initialize': server.initialize_handler
+ 'notifications/initialized': initialized_notification_handler
// Resource handlers
- 'resources/list': server.resources_list_handler,
- 'resources/read': server.resources_read_handler,
- 'resources/templates/list': server.resources_templates_list_handler,
- 'resources/subscribe': server.resources_subscribe_handler,
-
+ 'resources/list': server.resources_list_handler
+ 'resources/read': server.resources_read_handler
+ 'resources/templates/list': server.resources_templates_list_handler
+ 'resources/subscribe': server.resources_subscribe_handler
// Prompt handlers
- 'prompts/list': server.prompts_list_handler,
- 'prompts/get': server.prompts_get_handler,
- 'completion/complete': server.prompts_get_handler,
-
+ 'prompts/list': server.prompts_list_handler
+ 'prompts/get': server.prompts_get_handler
+ 'completion/complete': server.prompts_get_handler
// Tool handlers
- 'tools/list': server.tools_list_handler,
- 'tools/call': server.tools_call_handler
+ 'tools/list': server.tools_list_handler
+ 'tools/call': server.tools_call_handler
}
})!
diff --git a/lib/mcp/generics.v b/lib/mcp/generics.v
index 4dde2f1b..a8411f1c 100644
--- a/lib/mcp/generics.v
+++ b/lib/mcp/generics.v
@@ -1,6 +1,5 @@
module mcp
-
pub fn result_to_mcp_tool_contents[T](result T) []ToolContent {
return [result_to_mcp_tool_content(result)]
}
@@ -50,4 +49,4 @@ pub fn array_to_mcp_tool_contents[U](array []U) []ToolContent {
contents << result_to_mcp_tool_content(item)
}
return contents
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/handler_prompts.v b/lib/mcp/handler_prompts.v
index d35ba6aa..bfb26fe0 100644
--- a/lib/mcp/handler_prompts.v
+++ b/lib/mcp/handler_prompts.v
@@ -110,7 +110,8 @@ fn (mut s Server) prompts_get_handler(data string) !string {
// messages := s.backend.prompt_messages_get(request.params.name, request.params.arguments)!
// Create a success response with the result
- response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(), PromptGetResult{
+ response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(),
+ PromptGetResult{
description: prompt.description
messages: messages
})
diff --git a/lib/mcp/handler_tools.v b/lib/mcp/handler_tools.v
index fdeefbc8..d4410a59 100644
--- a/lib/mcp/handler_tools.v
+++ b/lib/mcp/handler_tools.v
@@ -26,8 +26,8 @@ pub:
pub struct ToolItems {
pub:
- typ string @[json: 'type']
- enum []string
+ typ string @[json: 'type']
+ enum []string
properties map[string]ToolProperty
}
@@ -63,7 +63,7 @@ fn (mut s Server) tools_list_handler(data string) !string {
// TODO: Implement pagination logic using the cursor
// For now, return all tools
-encoded := json.encode(ToolListResult{
+ encoded := json.encode(ToolListResult{
tools: s.backend.tool_list()!
next_cursor: '' // Empty if no more pages
})
@@ -148,4 +148,4 @@ pub fn error_tool_call_result(err IError) ToolCallResult {
text: err.msg()
}]
}
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/mcpgen/command.v b/lib/mcp/mcpgen/command.v
index 37735734..c46d43ec 100644
--- a/lib/mcp/mcpgen/command.v
+++ b/lib/mcp/mcpgen/command.v
@@ -2,22 +2,21 @@ module mcpgen
import cli
-pub const command := cli.Command{
- sort_flags: true
- name: 'mcpgen'
+pub const command = cli.Command{
+ sort_flags: true
+ name: 'mcpgen'
// execute: cmd_mcpgen
description: 'will list existing mdbooks'
- commands: [
+ commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the MCP server'
- }
+ },
]
-
}
fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server(&MCPGen{})!
server.start()!
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/mcpgen/mcpgen.v b/lib/mcp/mcpgen/mcpgen.v
index 53539ba8..db98bd01 100644
--- a/lib/mcp/mcpgen/mcpgen.v
+++ b/lib/mcp/mcpgen/mcpgen.v
@@ -7,7 +7,7 @@ import freeflowuniverse.herolib.schemas.jsonschema.codegen
import os
pub struct FunctionPointer {
- name string // name of function
+ name string // name of function
module_path string // path to module
}
@@ -15,14 +15,14 @@ pub struct FunctionPointer {
// returns an MCP Tool code in v for attaching the function to the mcp server
// function_pointers: A list of function pointers to generate tools for
pub fn (d &MCPGen) create_mcp_tools_code(function_pointers []FunctionPointer) !string {
- mut str := ""
+ mut str := ''
for function_pointer in function_pointers {
str += d.create_mcp_tool_code(function_pointer.name, function_pointer.module_path)!
}
-
+
return str
-}
+}
// create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
// returns an MCP Tool code in v for attaching the function to the mcp server
@@ -30,11 +30,10 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
if !os.exists(module_path) {
return error('Module path does not exist: ${module_path}')
}
-
+
function := code.get_function_from_module(module_path, function_name) or {
return error('Failed to get function ${function_name} from module ${module_path}\n${err}')
}
-
mut types := map[string]string{}
for param in function.params {
@@ -43,9 +42,9 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
types[param.typ.symbol()] = code.get_type_from_module(module_path, param.typ.symbol())!
}
}
-
+
// Get the result type if it's a struct
- mut result_ := ""
+ mut result_ := ''
if function.result.typ is code.Result {
result_type := (function.result.typ as code.Result).typ
if result_type is code.Object {
@@ -60,7 +59,7 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
handler := d.create_mcp_tool_handler(function, types, result_)!
str := $tmpl('./templates/tool_code.v.template')
return str
-}
+}
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
// function: The V function string including preceding comments
@@ -68,7 +67,7 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
// result: The type of result of the create_mcp_tool function. Could be simply string, or struct {...}
pub fn (d &MCPGen) create_mcp_tool_handler(function code.Function, types map[string]string, result_ string) !string {
decode_stmts := function.params.map(argument_decode_stmt(it)).join_lines()
-
+
function_call := 'd.${function.name}(${function.params.map(it.name).join(',')})'
result := code.parse_type(result_)
str := $tmpl('./templates/tool_handler.v.template')
@@ -92,6 +91,7 @@ pub fn argument_decode_stmt(param code.Param) string {
panic('Unsupported type: ${param.typ}')
}
}
+
/*
in @generate_mcp.v , implement a create_mpc_tool_handler function that given a vlang function string and the types that map to their corresponding type definitions (for instance struct some_type: SomeType{...}), generates a vlang function such as the following:
@@ -103,7 +103,6 @@ pub fn (d &MCPGen) create_mcp_tool_tool_handler(arguments map[string]Any) !mcp.T
}
*/
-
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
// function: The V function string including preceding comments
// types: A map of struct names to their definitions for complex parameter types
@@ -111,14 +110,14 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// Create input schema for parameters
mut properties := map[string]jsonschema.SchemaRef{}
mut required := []string{}
-
+
for param in function.params {
// Add to required parameters
required << param.name
-
+
// Create property for this parameter
mut property := jsonschema.SchemaRef{}
-
+
// Check if this is a complex type defined in the types map
if param.typ.symbol() in types {
// Parse the struct definition to create a nested schema
@@ -133,21 +132,21 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// Handle primitive types
property = codegen.typesymbol_to_schema(param.typ.symbol())
}
-
+
properties[param.name] = property
}
-
+
// Create the input schema
input_schema := jsonschema.Schema{
- typ: 'object',
- properties: properties,
- required: required
+ typ: 'object'
+ properties: properties
+ required: required
}
-
+
// Create and return the Tool
return mcp.Tool{
- name: function.name,
- description: function.description,
+ name: function.name
+ description: function.description
input_schema: input_schema
}
}
@@ -157,7 +156,7 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// // returns: A jsonschema.Schema for the given input type
// // errors: Returns an error if the input type is not supported
// pub fn (d MCPGen) create_mcp_tool_input_schema(input string) !jsonschema.Schema {
-
+
// // if input is a primitive type, return a mcp jsonschema.Schema with that type
// if input == 'string' {
// return jsonschema.Schema{
@@ -176,30 +175,30 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// typ: 'boolean'
// }
// }
-
+
// // if input is a struct, return a mcp jsonschema.Schema with typ 'object' and properties for each field in the struct
// if input.starts_with('pub struct ') {
// struct_name := input[11..].split(' ')[0]
// fields := parse_struct_fields(input)
// mut properties := map[string]jsonschema.Schema{}
-
+
// for field_name, field_type in fields {
// property := jsonschema.Schema{
// typ: d.create_mcp_tool_input_schema(field_type)!.typ
// }
// properties[field_name] = property
// }
-
+
// return jsonschema.Schema{
// typ: 'object',
// properties: properties
// }
// }
-
+
// // if input is an array, return a mcp jsonschema.Schema with typ 'array' and items of the item type
// if input.starts_with('[]') {
// item_type := input[2..]
-
+
// // For array types, we create a schema with type 'array'
// // The actual item type is determined by the primitive type
// mut item_type_str := 'string' // default
@@ -210,74 +209,73 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// } else if item_type == 'bool' {
// item_type_str = 'boolean'
// }
-
+
// // Create a property for the array items
// mut property := jsonschema.Schema{
// typ: 'array'
// }
-
+
// // Add the property to the schema
// mut properties := map[string]jsonschema.Schema{}
// properties['items'] = property
-
+
// return jsonschema.Schema{
// typ: 'array',
// properties: properties
// }
// }
-
+
// // Default to string type for unknown types
// return jsonschema.Schema{
// typ: 'string'
// }
// }
-
// parse_struct_fields parses a V language struct definition string and returns a map of field names to their types
fn parse_struct_fields(struct_def string) map[string]string {
mut fields := map[string]string{}
-
+
// Find the opening and closing braces of the struct definition
start_idx := struct_def.index('{') or { return fields }
end_idx := struct_def.last_index('}') or { return fields }
-
+
// Extract the content between the braces
struct_content := struct_def[start_idx + 1..end_idx].trim_space()
-
+
// Split the content by newlines to get individual field definitions
field_lines := struct_content.split('
')
-
+
for line in field_lines {
trimmed_line := line.trim_space()
-
+
// Skip empty lines and comments
if trimmed_line == '' || trimmed_line.starts_with('//') {
continue
}
-
+
// Handle pub: or mut: prefixes
mut field_def := trimmed_line
if field_def.starts_with('pub:') || field_def.starts_with('mut:') {
field_def = field_def.all_after(':').trim_space()
}
-
+
// Split by whitespace to separate field name and type
parts := field_def.split_any(' ')
if parts.len < 2 {
continue
}
-
+
field_name := parts[0]
field_type := parts[1..].join(' ')
-
+
// Handle attributes like @[json: 'name']
if field_name.contains('@[') {
continue
}
-
+
fields[field_name] = field_type
}
-
+
return fields
}
diff --git a/lib/mcp/mcpgen/mcpgen_tools.v b/lib/mcp/mcpgen/mcpgen_tools.v
index f583502c..4bea8e4a 100644
--- a/lib/mcp/mcpgen/mcpgen_tools.v
+++ b/lib/mcp/mcpgen/mcpgen_tools.v
@@ -12,42 +12,41 @@ import x.json2 as json { Any }
// function_pointers: A list of function pointers to generate tools for
const create_mcp_tools_code_tool = mcp.Tool{
- name: 'create_mcp_tools_code'
- description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
+ name: 'create_mcp_tools_code'
+ description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
returns an MCP Tool code in v for attaching the function to the mcp server
function_pointers: A list of function pointers to generate tools for'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {
- 'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'array'
- items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'object'
- properties: {
- 'name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- required: ['name', 'module_path']
- }))
- })
- }
- required: ['function_pointers']
- }
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'array'
+ items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'name': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ required: ['name', 'module_path']
+ }))
+ })
+ }
+ required: ['function_pointers']
+ }
}
pub fn (d &MCPGen) create_mcp_tools_code_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
- function_pointers := json.decode[[]FunctionPointer](arguments["function_pointers"].str())!
- result := d.create_mcp_tools_code(function_pointers)
- or {
+ function_pointers := json.decode[[]FunctionPointer](arguments['function_pointers'].str())!
+ result := d.create_mcp_tools_code(function_pointers) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -59,10 +58,10 @@ returns an MCP Tool code in v for attaching the function to the mcp server'
typ: 'object'
properties: {
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
})
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
})
}
required: ['function_name', 'module_path']
diff --git a/lib/mcp/mcpgen/server.v b/lib/mcp/mcpgen/server.v
index 21bd0d1f..4196a7ce 100644
--- a/lib/mcp/mcpgen/server.v
+++ b/lib/mcp/mcpgen/server.v
@@ -12,16 +12,16 @@ pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
- 'create_mcp_tool_code': create_mcp_tool_code_tool
- 'create_mcp_tool_const': create_mcp_tool_const_tool
+ 'create_mcp_tool_code': create_mcp_tool_code_tool
+ 'create_mcp_tool_const': create_mcp_tool_const_tool
'create_mcp_tool_handler': create_mcp_tool_handler_tool
- 'create_mcp_tools_code': create_mcp_tools_code_tool
+ 'create_mcp_tools_code': create_mcp_tools_code_tool
}
tool_handlers: {
- 'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler
- 'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler
+ 'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler
+ 'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler
'create_mcp_tool_handler': v.create_mcp_tool_handler_tool_handler
- 'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler
+ 'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -32,4 +32,4 @@ pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
}
})!
return server
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/pugconvert/cmd/main.v b/lib/mcp/pugconvert/cmd/main.v
index d3642264..0d4c483c 100644
--- a/lib/mcp/pugconvert/cmd/main.v
+++ b/lib/mcp/pugconvert/cmd/main.v
@@ -8,7 +8,7 @@ fn main() {
eprintln('Failed to create MCP server: ${err}')
return
}
-
+
// Start the server
server.start() or {
eprintln('Failed to start MCP server: ${err}')
diff --git a/lib/mcp/pugconvert/logic/convertpug.v b/lib/mcp/pugconvert/logic/convertpug.v
index 33206bb7..dfb6cca9 100644
--- a/lib/mcp/pugconvert/logic/convertpug.v
+++ b/lib/mcp/pugconvert/logic/convertpug.v
@@ -5,8 +5,7 @@ import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import json
-pub fn convert_pug(mydir string)! {
-
+pub fn convert_pug(mydir string) ! {
mut d := pathlib.get_dir(path: mydir, create: false)!
list := d.list(regex: [r'.*\.pug$'], include_links: false, files_only: true)!
for item in list.paths {
@@ -17,12 +16,12 @@ pub fn convert_pug(mydir string)! {
// extract_template parses AI response content to extract just the template
fn extract_template(raw_content string) string {
mut content := raw_content
-
+
// First check for tag
if content.contains('') {
content = content.split('')[1].trim_space()
}
-
+
// Look for ```jet code block
if content.contains('```jet') {
parts := content.split('```jet')
@@ -39,7 +38,7 @@ fn extract_template(raw_content string) string {
// Take the content between the first set of ```
// This handles both ```content``` and cases where there's only an opening ```
content = parts[1].trim_space()
-
+
// If we only see an opening ``` but no closing, cleanup any remaining backticks
// to avoid incomplete formatting markers
if !content.contains('```') {
@@ -47,16 +46,16 @@ fn extract_template(raw_content string) string {
}
}
}
-
+
return content
}
-pub fn convert_pug_file(myfile string)! {
+pub fn convert_pug_file(myfile string) ! {
println(myfile)
// Create new file path by replacing .pug extension with .jet
jet_file := myfile.replace('.pug', '.jet')
-
+
// Check if jet file already exists, if so skip processing
mut jet_path_exist := pathlib.get_file(path: jet_file, create: false)!
if jet_path_exist.exists() {
@@ -69,7 +68,7 @@ pub fn convert_pug_file(myfile string)! {
mut l := loader()
mut client := openai.get()!
-
+
base_instruction := '
You are a template language converter. You convert Pug templates to Jet templates.
@@ -82,25 +81,24 @@ pub fn convert_pug_file(myfile string)! {
only output the resulting template, no explanation, no steps, just the jet template
'
-
// We'll retry up to 5 times if validation fails
max_attempts := 5
mut attempts := 0
mut is_valid := false
mut error_message := ''
mut template := ''
-
+
for attempts < max_attempts && !is_valid {
attempts++
-
- mut system_content := texttools.dedent(base_instruction) + "\n" + l.jet()
+
+ mut system_content := texttools.dedent(base_instruction) + '\n' + l.jet()
mut user_prompt := ''
-
+
// Create different prompts for first attempt vs retries
if attempts == 1 {
// First attempt - convert from PUG
- user_prompt = texttools.dedent(base_user_prompt) + "\n" + content
-
+ user_prompt = texttools.dedent(base_user_prompt) + '\n' + content
+
// Print what we're sending to the AI service
println('Sending to OpenAI for conversion:')
println('--------------------------------')
@@ -127,53 +125,57 @@ Please fix the template and try again. Learn from feedback and check which jet t
Return only the corrected Jet template.
Dont send back more information than the fixed template, make sure its in jet format.
- '
-
- // Print what we're sending for the retry
+ ' // Print what we're sending for the retry
+
println('Sending to OpenAI for correction:')
println('--------------------------------')
println(user_prompt)
println('--------------------------------')
}
-
+
mut m := openai.Messages{
messages: [
openai.Message{
role: .system
content: system_content
- },
+ },
openai.Message{
role: .user
content: user_prompt
},
- ]}
-
+ ]
+ }
+
// Create a chat completion request
- res := client.chat_completion(msgs: m, model: "deepseek-r1-distill-llama-70b", max_completion_tokens: 64000)!
-
- println("-----")
-
+ res := client.chat_completion(
+ msgs: m
+ model: 'deepseek-r1-distill-llama-70b'
+ max_completion_tokens: 64000
+ )!
+
+ println('-----')
+
// Print AI response before extraction
println('Response received from AI:')
println('--------------------------------')
println(res.choices[0].message.content)
println('--------------------------------')
-
+
// Extract the template from the AI response
template = extract_template(res.choices[0].message.content)
-
+
println('Extracted template for ${myfile}:')
println('--------------------------------')
println(template)
println('--------------------------------')
-
+
// Validate the template
validation_result := jetvaliditycheck(template) or {
// If validation service is unavailable, we'll just proceed with the template
println('Warning: Template validation service unavailable: ${err}')
break
}
-
+
// Check if template is valid
if validation_result.is_valid {
is_valid = true
@@ -183,19 +185,19 @@ Dont send back more information than the fixed template, make sure its in jet fo
println('Template validation failed: ${error_message}')
}
}
-
+
// Report the validation outcome
if is_valid {
println('Successfully converted template after ${attempts} attempt(s)')
// Create the file and write the processed content
- println("Converted to: ${jet_file}")
+ println('Converted to: ${jet_file}')
mut jet_path := pathlib.get_file(path: jet_file, create: true)!
- jet_path.write(template)!
+ jet_path.write(template)!
} else if attempts >= max_attempts {
println('Warning: Could not validate template after ${max_attempts} attempts')
println('Using best attempt despite validation errors: ${error_message}')
- jet_file2:=jet_file.replace(".jet","_error.jet")
+ jet_file2 := jet_file.replace('.jet', '_error.jet')
mut jet_path2 := pathlib.get_file(path: jet_file2, create: true)!
- jet_path2.write(template)!
+ jet_path2.write(template)!
}
}
diff --git a/lib/mcp/pugconvert/logic/jetvalidation.v b/lib/mcp/pugconvert/logic/jetvalidation.v
index 3daca315..bad12bf3 100644
--- a/lib/mcp/pugconvert/logic/jetvalidation.v
+++ b/lib/mcp/pugconvert/logic/jetvalidation.v
@@ -5,9 +5,9 @@ import json
// JetTemplateResponse is the expected response structure from the validation service
struct JetTemplateResponse {
- valid bool
- message string
- error string
+ valid bool
+ message string
+ error string
}
// ValidationResult represents the result of a template validation
@@ -30,7 +30,7 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
template_data := json.encode({
'template': jetcontent
})
-
+
// Print what we're sending to the AI service
// println('Sending to JET validation service:')
// println('--------------------------------')
@@ -39,8 +39,8 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// Send the POST request to the validation endpoint
req := httpconnection.Request{
- prefix: 'checkjet',
- data: template_data,
+ prefix: 'checkjet'
+ data: template_data
dataformat: .json
}
@@ -49,7 +49,7 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// Handle connection errors
return ValidationResult{
is_valid: false
- error: 'Connection error: ${err}'
+ error: 'Connection error: ${err}'
}
}
@@ -58,12 +58,12 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// If we can't parse JSON using our struct, the server didn't return the expected format
return ValidationResult{
is_valid: false
- error: 'Server returned unexpected format: ${err.msg()}'
+ error: 'Server returned unexpected format: ${err.msg()}'
}
}
// Use the structured response data
- if response.valid == false{
+ if response.valid == false {
error_msg := if response.error != '' {
response.error
} else if response.message != '' {
@@ -74,12 +74,12 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
return ValidationResult{
is_valid: false
- error: error_msg
+ error: error_msg
}
}
return ValidationResult{
is_valid: true
- error: ''
+ error: ''
}
}
diff --git a/lib/mcp/pugconvert/logic/loader.v b/lib/mcp/pugconvert/logic/loader.v
index 9853cac9..2a35d454 100644
--- a/lib/mcp/pugconvert/logic/loader.v
+++ b/lib/mcp/pugconvert/logic/loader.v
@@ -10,12 +10,11 @@ pub mut:
}
fn (mut loader FileLoader) load() {
- loader.embedded_files["jet"]=$embed_file('templates/jet_instructions.md')
+ loader.embedded_files['jet'] = $embed_file('templates/jet_instructions.md')
}
-
fn (mut loader FileLoader) jet() string {
- c:=loader.embedded_files["jet"] or { panic("bug embed") }
+ c := loader.embedded_files['jet'] or { panic('bug embed') }
return c.to_string()
}
@@ -23,4 +22,4 @@ fn loader() FileLoader {
mut loader := FileLoader{}
loader.load()
return loader
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/pugconvert/mcp/handlers.v b/lib/mcp/pugconvert/mcp/handlers.v
index ee129aa8..00deb811 100644
--- a/lib/mcp/pugconvert/mcp/handlers.v
+++ b/lib/mcp/pugconvert/mcp/handlers.v
@@ -8,47 +8,47 @@ import os
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
-
+
// Check if path exists
if !os.exists(path) {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
+ content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
}
}
-
+
// Determine if path is a file or directory
is_directory := os.is_dir(path)
-
- mut message := ""
-
+
+ mut message := ''
+
if is_directory {
// Convert all pug files in the directory
pugconvert.convert_pug(path) or {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error converting pug files in directory: ${err}")
+ content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
}
}
message = "Successfully converted all pug files in directory '${path}'"
- } else if path.ends_with(".pug") {
+ } else if path.ends_with('.pug') {
// Convert a single pug file
pugconvert.convert_pug_file(path) or {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error converting pug file: ${err}")
+ content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
}
}
message = "Successfully converted pug file '${path}'"
} else {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
+ content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
}
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](message)
+ content: mcp.result_to_mcp_tool_contents[string](message)
}
}
diff --git a/lib/mcp/pugconvert/mcp/specifications.v b/lib/mcp/pugconvert/mcp/specifications.v
index 0716f0a7..d01ff2d2 100644
--- a/lib/mcp/pugconvert/mcp/specifications.v
+++ b/lib/mcp/pugconvert/mcp/specifications.v
@@ -1,18 +1,18 @@
module mcp
import freeflowuniverse.herolib.mcp
-import x.json2 as json { Any }
+import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.mcp.logger
const specs = mcp.Tool{
name: 'pugconvert'
description: 'Convert Pug template files to Jet template files'
- input_schema: jsonschema.Schema{
+ input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string',
+ typ: 'string'
description: 'Path to a .pug file or directory containing .pug files to convert'
})
}
diff --git a/lib/mcp/rhai/cmd/main.v b/lib/mcp/rhai/cmd/main.v
index 827a3f90..13704783 100644
--- a/lib/mcp/rhai/cmd/main.v
+++ b/lib/mcp/rhai/cmd/main.v
@@ -9,7 +9,7 @@ fn main() {
log.error('Failed to create MCP server: ${err}')
return
}
-
+
// Start the server
server.start() or {
log.error('Failed to start MCP server: ${err}')
diff --git a/lib/mcp/rhai/example/example copy.vsh b/lib/mcp/rhai/example/example copy.vsh
index ae79694d..52d9f678 100644
--- a/lib/mcp/rhai/example/example copy.vsh
+++ b/lib/mcp/rhai/example/example copy.vsh
@@ -4,163 +4,175 @@ import freeflowuniverse.herolib.mcp.aitools.escalayer
import os
fn main() {
- // Get the current directory
- current_dir := os.dir(@FILE)
-
- // Check if a source code path was provided as an argument
- if os.args.len < 2 {
- println('Please provide the path to the source code directory as an argument')
- println('Example: ./example.vsh /path/to/source/code/directory')
- return
- }
-
- // Get the source code path from the command line arguments
- source_code_path := os.args[1]
-
- // Check if the path exists and is a directory
- if !os.exists(source_code_path) {
- println('Source code path does not exist: ${source_code_path}')
- return
- }
-
- if !os.is_dir(source_code_path) {
- println('Source code path is not a directory: ${source_code_path}')
- return
- }
-
- // Get all Rust files in the directory
- files := os.ls(source_code_path) or {
- println('Failed to list files in directory: ${err}')
- return
- }
-
- // Combine all Rust files into a single source code string
- mut source_code := ''
- for file in files {
- file_path := os.join_path(source_code_path, file)
-
- // Skip directories and non-Rust files
- if os.is_dir(file_path) || !file.ends_with('.rs') {
- continue
- }
-
- // Read the file content
- file_content := os.read_file(file_path) or {
- println('Failed to read file ${file_path}: ${err}')
- continue
- }
-
- // Add file content to the combined source code
- source_code += '// File: ${file}\n${file_content}\n\n'
- }
-
- if source_code == '' {
- println('No Rust files found in directory: ${source_code_path}')
- return
- }
-
- // Read the rhaiwrapping.md file
- rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
- println('Failed to read rhaiwrapping.md: ${err}')
- return
- }
-
- // Determine the crate path from the source code path
- // Extract the path relative to the src directory
- src_index := source_code_path.index('src/') or {
- println('Could not determine crate path: src/ not found in path')
- return
- }
-
- mut path_parts := source_code_path[src_index+4..].split('/')
- // Remove the last part (the file name)
- if path_parts.len > 0 {
- path_parts.delete_last()
- }
- rel_path := path_parts.join('::')
- crate_path := 'sal::${rel_path}'
-
- // Create a new task
- mut task := escalayer.new_task(
- name: 'rhai_wrapper_creator.escalayer'
- description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
- )
-
- // Create model configs
- sonnet_model := escalayer.ModelConfig{
- name: 'anthropic/claude-3.7-sonnet'
- provider: 'anthropic'
- temperature: 0.7
- max_tokens: 25000
- }
-
- gpt4_model := escalayer.ModelConfig{
- name: 'gpt-4'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 25000
- }
-
- // Extract the module name from the directory path (last component)
- dir_parts := source_code_path.split('/')
- name := dir_parts[dir_parts.len - 1]
-
- // Create the prompt with source code, wrapper example, and rhai_wrapping_md
- prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or { '' }, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or { '' }, crate_path)
-
- // Create a prompt function that returns the prepared content
- prompt_function := fn [prompt_content] (input string) string {
- return prompt_content
- }
+ // Get the current directory
+ current_dir := os.dir(@FILE)
- gen := RhaiGen{
- name: name
- dir: source_code_path
- }
-
- // Define a single unit task that handles everything
- task.new_unit_task(
- name: 'create_rhai_wrappers'
- prompt_function: prompt_function
- callback_function: gen.process_rhai_wrappers
- base_model: sonnet_model
- retry_model: gpt4_model
- retry_count: 1
- )
-
- // Initiate the task
- result := task.initiate('') or {
- println('Task failed: ${err}')
- return
- }
-
- println('Task completed successfully')
- println('The wrapper files have been generated and compiled in the target directory.')
- println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
+ // Check if a source code path was provided as an argument
+ if os.args.len < 2 {
+ println('Please provide the path to the source code directory as an argument')
+ println('Example: ./example.vsh /path/to/source/code/directory')
+ return
+ }
+
+ // Get the source code path from the command line arguments
+ source_code_path := os.args[1]
+
+ // Check if the path exists and is a directory
+ if !os.exists(source_code_path) {
+ println('Source code path does not exist: ${source_code_path}')
+ return
+ }
+
+ if !os.is_dir(source_code_path) {
+ println('Source code path is not a directory: ${source_code_path}')
+ return
+ }
+
+ // Get all Rust files in the directory
+ files := os.ls(source_code_path) or {
+ println('Failed to list files in directory: ${err}')
+ return
+ }
+
+ // Combine all Rust files into a single source code string
+ mut source_code := ''
+ for file in files {
+ file_path := os.join_path(source_code_path, file)
+
+ // Skip directories and non-Rust files
+ if os.is_dir(file_path) || !file.ends_with('.rs') {
+ continue
+ }
+
+ // Read the file content
+ file_content := os.read_file(file_path) or {
+ println('Failed to read file ${file_path}: ${err}')
+ continue
+ }
+
+ // Add file content to the combined source code
+ source_code += '// File: ${file}\n${file_content}\n\n'
+ }
+
+ if source_code == '' {
+ println('No Rust files found in directory: ${source_code_path}')
+ return
+ }
+
+ // Read the rhaiwrapping.md file
+ rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
+ println('Failed to read rhaiwrapping.md: ${err}')
+ return
+ }
+
+ // Determine the crate path from the source code path
+ // Extract the path relative to the src directory
+ src_index := source_code_path.index('src/') or {
+ println('Could not determine crate path: src/ not found in path')
+ return
+ }
+
+ mut path_parts := source_code_path[src_index + 4..].split('/')
+ // Remove the last part (the file name)
+ if path_parts.len > 0 {
+ path_parts.delete_last()
+ }
+ rel_path := path_parts.join('::')
+ crate_path := 'sal::${rel_path}'
+
+ // Create a new task
+ mut task := escalayer.new_task(
+ name: 'rhai_wrapper_creator.escalayer'
+ description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
+ )
+
+ // Create model configs
+ sonnet_model := escalayer.ModelConfig{
+ name: 'anthropic/claude-3.7-sonnet'
+ provider: 'anthropic'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ gpt4_model := escalayer.ModelConfig{
+ name: 'gpt-4'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ // Extract the module name from the directory path (last component)
+ dir_parts := source_code_path.split('/')
+ name := dir_parts[dir_parts.len - 1]
+
+ // Create the prompt with source code, wrapper example, and rhai_wrapping_md
+ prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or {
+ ''
+ }, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or {
+ ''
+ }, crate_path)
+
+ // Create a prompt function that returns the prepared content
+ prompt_function := fn [prompt_content] (input string) string {
+ return prompt_content
+ }
+
+ gen := RhaiGen{
+ name: name
+ dir: source_code_path
+ }
+
+ // Define a single unit task that handles everything
+ task.new_unit_task(
+ name: 'create_rhai_wrappers'
+ prompt_function: prompt_function
+ callback_function: gen.process_rhai_wrappers
+ base_model: sonnet_model
+ retry_model: gpt4_model
+ retry_count: 1
+ )
+
+ // Initiate the task
+ result := task.initiate('') or {
+ println('Task failed: ${err}')
+ return
+ }
+
+ println('Task completed successfully')
+ println('The wrapper files have been generated and compiled in the target directory.')
+ println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
}
// Define the prompt functions
fn separate_functions(input string) string {
- return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}'
+ return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}'
}
fn create_wrappers(input string) string {
- return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}'
+ return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}'
}
fn create_example(input string) string {
- return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}'
+ return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}'
}
// Define a Rhai wrapper generator function for Container functions
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
- guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or { panic('Failed to read guides') }
- engine := $tmpl('./prompts/engine.md')
- vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or { panic('Failed to read guides') }
- rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or { panic('Failed to read guides') }
- rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or { panic('Failed to read guides') }
- generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
- return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
+ guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or {
+ panic('Failed to read guides')
+ }
+ engine := $tmpl('./prompts/engine.md')
+ vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or {
+ panic('Failed to read guides')
+ }
+ rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or {
+ panic('Failed to read guides')
+ }
+ rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or {
+ panic('Failed to read guides')
+ }
+ generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
+ return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
${guides}
${vector_vs_array}
${example_rhai}
@@ -267,263 +279,254 @@ your engine create function is called `create_rhai_engine`
@[params]
pub struct WrapperModule {
pub:
- lib_rs string
- example_rs string
- engine_rs string
- cargo_toml string
- example_rhai string
- generic_wrapper_rs string
- wrapper_rs string
+ lib_rs string
+ example_rs string
+ engine_rs string
+ cargo_toml string
+ example_rhai string
+ generic_wrapper_rs string
+ wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
-fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string)! string {
- // Define project directory paths
- name := name_
- project_dir := '${base_dir}/rhai'
-
- // Create the project using cargo new --lib
- if os.exists(project_dir) {
- os.rmdir_all(project_dir) or {
- return error('Failed to clean existing project directory: ${err}')
- }
- }
-
- // Run cargo new --lib to create the project
- os.chdir(base_dir) or {
- return error('Failed to change directory to base directory: ${err}')
- }
-
- cargo_new_result := os.execute('cargo new --lib rhai')
- if cargo_new_result.exit_code != 0 {
- return error('Failed to create new library project: ${cargo_new_result.output}')
- }
-
- // Create examples directory
- examples_dir := '${project_dir}/examples'
- os.mkdir_all(examples_dir) or {
- return error('Failed to create examples directory: ${err}')
- }
-
- // Write the lib.rs file
- if wrapper.lib_rs != '' {
- os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
- return error('Failed to write lib.rs: ${err}')
- }
- }
+fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
+ // Define project directory paths
+ name := name_
+ project_dir := '${base_dir}/rhai'
- // Write the wrapper.rs file
- if wrapper.wrapper_rs != '' {
- os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
- return error('Failed to write wrapper.rs: ${err}')
- }
- }
-
- // Write the generic wrapper.rs file
- if wrapper.generic_wrapper_rs != '' {
- os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
- return error('Failed to write generic wrapper.rs: ${err}')
- }
- }
-
- // Write the example.rs file
- if wrapper.example_rs != '' {
- os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
- return error('Failed to write example.rs: ${err}')
- }
- }
-
- // Write the engine.rs file if provided
- if wrapper.engine_rs != '' {
- os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
- return error('Failed to write engine.rs: ${err}')
- }
- }
-
- // Write the Cargo.toml file
- if wrapper.cargo_toml != '' {
- os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
- return error('Failed to write Cargo.toml: ${err}')
- }
- }
-
- // Write the example.rhai file if provided
- if wrapper.example_rhai != '' {
- os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
- return error('Failed to write example.rhai: ${err}')
- }
- }
-
- return project_dir
+ // Create the project using cargo new --lib
+ if os.exists(project_dir) {
+ os.rmdir_all(project_dir) or {
+ return error('Failed to clean existing project directory: ${err}')
+ }
+ }
+
+ // Run cargo new --lib to create the project
+ os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
+
+ cargo_new_result := os.execute('cargo new --lib rhai')
+ if cargo_new_result.exit_code != 0 {
+ return error('Failed to create new library project: ${cargo_new_result.output}')
+ }
+
+ // Create examples directory
+ examples_dir := '${project_dir}/examples'
+ os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
+
+ // Write the lib.rs file
+ if wrapper.lib_rs != '' {
+ os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
+ return error('Failed to write lib.rs: ${err}')
+ }
+ }
+
+ // Write the wrapper.rs file
+ if wrapper.wrapper_rs != '' {
+ os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
+ return error('Failed to write wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the generic wrapper.rs file
+ if wrapper.generic_wrapper_rs != '' {
+ os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
+ return error('Failed to write generic wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the example.rs file
+ if wrapper.example_rs != '' {
+ os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
+ return error('Failed to write example.rs: ${err}')
+ }
+ }
+
+ // Write the engine.rs file if provided
+ if wrapper.engine_rs != '' {
+ os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
+ return error('Failed to write engine.rs: ${err}')
+ }
+ }
+
+ // Write the Cargo.toml file
+ if wrapper.cargo_toml != '' {
+ os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
+ return error('Failed to write Cargo.toml: ${err}')
+ }
+ }
+
+ // Write the example.rhai file if provided
+ if wrapper.example_rhai != '' {
+ os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
+ return error('Failed to write example.rhai: ${err}')
+ }
+ }
+
+ return project_dir
}
// Helper function to extract code blocks from the response
fn extract_code_block(response string, identifier string, language string) string {
- // Find the start marker for the code block
- mut start_marker := '```${language}\n// ${identifier}'
- if language == '' {
- start_marker = '```\n// ${identifier}'
- }
-
- start_index := response.index(start_marker) or {
- // Try alternative format
- mut alt_marker := '```${language}\n${identifier}'
- if language == '' {
- alt_marker = '```\n${identifier}'
- }
-
- response.index(alt_marker) or {
- return ''
- }
- }
-
- // Find the end marker
- end_marker := '```'
- end_index := response.index_after(end_marker, start_index + start_marker.len) or {
- return ''
- }
-
- // Extract the content between the markers
- content_start := start_index + start_marker.len
- content := response[content_start..end_index].trim_space()
-
- return content
+ // Find the start marker for the code block
+ mut start_marker := '```${language}\n// ${identifier}'
+ if language == '' {
+ start_marker = '```\n// ${identifier}'
+ }
+
+ start_index := response.index(start_marker) or {
+ // Try alternative format
+ mut alt_marker := '```${language}\n${identifier}'
+ if language == '' {
+ alt_marker = '```\n${identifier}'
+ }
+
+ response.index(alt_marker) or { return '' }
+ }
+
+ // Find the end marker
+ end_marker := '```'
+ end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
+
+ // Extract the content between the markers
+ content_start := start_index + start_marker.len
+ content := response[content_start..end_index].trim_space()
+
+ return content
}
// Extract module name from wrapper code
fn extract_module_name(code string) string {
- lines := code.split('\n')
-
- for line in lines {
- // Look for pub mod or mod declarations
- if line.contains('pub mod ') || line.contains('mod ') {
- // Extract module name
- mut parts := []string{}
- if line.contains('pub mod ') {
- parts = line.split('pub mod ')
- } else {
- parts = line.split('mod ')
- }
-
- if parts.len > 1 {
- // Extract the module name and remove any trailing characters
- mut name := parts[1].trim_space()
- // Remove any trailing { or ; or whitespace
- name = name.trim_right('{').trim_right(';').trim_space()
- if name != '' {
- return name
- }
- }
- }
- }
-
- return ''
+ lines := code.split('\n')
+
+ for line in lines {
+ // Look for pub mod or mod declarations
+ if line.contains('pub mod ') || line.contains('mod ') {
+ // Extract module name
+ mut parts := []string{}
+ if line.contains('pub mod ') {
+ parts = line.split('pub mod ')
+ } else {
+ parts = line.split('mod ')
+ }
+
+ if parts.len > 1 {
+ // Extract the module name and remove any trailing characters
+ mut name := parts[1].trim_space()
+ // Remove any trailing { or ; or whitespace
+ name = name.trim_right('{').trim_right(';').trim_space()
+ if name != '' {
+ return name
+ }
+ }
+ }
+ }
+
+ return ''
}
struct RhaiGen {
- name string
- dir string
+ name string
+ dir string
}
// Define the callback function that processes the response and compiles the code
-fn (gen RhaiGen)process_rhai_wrappers(response string)! string {
- // Extract wrapper.rs content
- wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
- if wrapper_rs_content == '' {
- return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
- }
-
- // Extract engine.rs content
- mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
- if engine_rs_content == '' {
- // Try to extract from the response without explicit language marker
- engine_rs_content = extract_code_block(response, 'engine.rs', '')
- // if engine_rs_content == '' {
- // // Use the template engine.rs
- // engine_rs_content = $tmpl('./templates/engine.rs')
- // }
- }
-
- // Extract example.rhai content
- mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- // Try to extract from the response without explicit language marker
- example_rhai_content = extract_code_block(response, 'example.rhai', '')
- if example_rhai_content == '' {
- // Use the example from the template
- example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
- return error('Failed to read example.rhai template: ${err}')
- }
-
- // Extract the code block from the markdown file
- example_rhai_content = extract_code_block(example_script_md, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- return error('Failed to extract example.rhai from template file')
- }
- }
- }
-
- // Extract function names from the wrapper.rs content
- functions := extract_functions_from_code(wrapper_rs_content)
-
- println('Using module name: ${gen.name}_rhai')
- println('Extracted functions: ${functions.join(", ")}')
-
- name := gen.name
- // Create a WrapperModule struct with the extracted content
- wrapper := WrapperModule{
- lib_rs: $tmpl('./templates/lib.rs')
- wrapper_rs: wrapper_rs_content
- example_rs: $tmpl('./templates/example.rs')
- engine_rs: engine_rs_content
- generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
- cargo_toml: $tmpl('./templates/cargo.toml')
- example_rhai: example_rhai_content
- }
-
- // Create the wrapper module
- base_target_dir := gen.dir
- project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or {
- return error('Failed to create wrapper module: ${err}')
- }
-
- // Run the example
- os.chdir(project_dir) or {
- return error('Failed to change directory to project: ${err}')
- }
-
- // Run cargo build first
- build_result := os.execute('cargo build')
- if build_result.exit_code != 0 {
- return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
- }
-
- // Run the example
- run_result := os.execute('cargo run --example example')
-
- return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
+fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
+ // Extract wrapper.rs content
+ wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
+ if wrapper_rs_content == '' {
+ return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
+ }
+
+ // Extract engine.rs content
+ mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
+ if engine_rs_content == '' {
+ // Try to extract from the response without explicit language marker
+ engine_rs_content = extract_code_block(response, 'engine.rs', '')
+ // if engine_rs_content == '' {
+ // // Use the template engine.rs
+ // engine_rs_content = $tmpl('./templates/engine.rs')
+ // }
+ }
+
+ // Extract example.rhai content
+ mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
+ if example_rhai_content == '' {
+ // Try to extract from the response without explicit language marker
+ example_rhai_content = extract_code_block(response, 'example.rhai', '')
+ if example_rhai_content == '' {
+ // Use the example from the template
+ example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
+ return error('Failed to read example.rhai template: ${err}')
+ }
+
+ // Extract the code block from the markdown file
+ example_rhai_content = extract_code_block(example_script_md, 'example.rhai',
+ 'rhai')
+ if example_rhai_content == '' {
+ return error('Failed to extract example.rhai from template file')
+ }
+ }
+ }
+
+ // Extract function names from the wrapper.rs content
+ functions := extract_functions_from_code(wrapper_rs_content)
+
+ println('Using module name: ${gen.name}_rhai')
+ println('Extracted functions: ${functions.join(', ')}')
+
+ name := gen.name
+ // Create a WrapperModule struct with the extracted content
+ wrapper := WrapperModule{
+ lib_rs: $tmpl('./templates/lib.rs')
+ wrapper_rs: wrapper_rs_content
+ example_rs: $tmpl('./templates/example.rs')
+ engine_rs: engine_rs_content
+ generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
+ cargo_toml: $tmpl('./templates/cargo.toml')
+ example_rhai: example_rhai_content
+ }
+
+ // Create the wrapper module
+ base_target_dir := gen.dir
+ project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or {
+ return error('Failed to create wrapper module: ${err}')
+ }
+
+ // Run the example
+ os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
+
+ // Run cargo build first
+ build_result := os.execute('cargo build')
+ if build_result.exit_code != 0 {
+ return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
+ }
+
+ // Run the example
+ run_result := os.execute('cargo run --example example')
+
+ return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
}
// Extract function names from wrapper code
fn extract_functions_from_code(code string) []string {
- mut functions := []string{}
- lines := code.split('\n')
-
- for line in lines {
- if line.contains('pub fn ') && !line.contains('//') {
- // Extract function name
- parts := line.split('pub fn ')
- if parts.len > 1 {
- name_parts := parts[1].split('(')
- if name_parts.len > 0 {
- fn_name := name_parts[0].trim_space()
- if fn_name != '' {
- functions << fn_name
- }
- }
- }
- }
- }
-
- return functions
-}
\ No newline at end of file
+ mut functions := []string{}
+ lines := code.split('\n')
+
+ for line in lines {
+ if line.contains('pub fn ') && !line.contains('//') {
+ // Extract function name
+ parts := line.split('pub fn ')
+ if parts.len > 1 {
+ name_parts := parts[1].split('(')
+ if name_parts.len > 0 {
+ fn_name := name_parts[0].trim_space()
+ if fn_name != '' {
+ functions << fn_name
+ }
+ }
+ }
+ }
+ }
+
+ return functions
+}
diff --git a/lib/mcp/rhai/example/example.vsh b/lib/mcp/rhai/example/example.vsh
index cfcb8574..23e268b7 100755
--- a/lib/mcp/rhai/example/example.vsh
+++ b/lib/mcp/rhai/example/example.vsh
@@ -4,209 +4,204 @@ import freeflowuniverse.herolib.mcp.aitools.escalayer
import os
fn main() {
- // Get the current directory where this script is located
- current_dir := os.dir(@FILE)
-
- // Validate command line arguments
- source_code_path := validate_command_args() or {
- println(err)
- return
- }
-
- // Read and combine all Rust files in the source directory
- source_code := read_source_code(source_code_path) or {
- println(err)
- return
- }
-
- // Determine the crate path from the source code path
- crate_path := determine_crate_path(source_code_path) or {
- println(err)
- return
- }
-
- // Extract the module name from the directory path (last component)
- name := extract_module_name_from_path(source_code_path)
-
- // Create the prompt content for the AI
- prompt_content := create_rhai_wrappers(
- name,
- source_code,
- read_file_safely('${current_dir}/prompts/example_script.md'),
- read_file_safely('${current_dir}/prompts/wrapper.md'),
- read_file_safely('${current_dir}/prompts/errors.md'),
- crate_path
- )
-
- // Create the generator instance
- gen := RhaiGen{
- name: name
- dir: source_code_path
- }
-
- // Run the task to generate Rhai wrappers
- run_wrapper_generation_task(prompt_content, gen) or {
- println('Task failed: ${err}')
- return
- }
-
- println('Task completed successfully')
- println('The wrapper files have been generated and compiled in the target directory.')
- println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
+ // Get the current directory where this script is located
+ current_dir := os.dir(@FILE)
+
+ // Validate command line arguments
+ source_code_path := validate_command_args() or {
+ println(err)
+ return
+ }
+
+ // Read and combine all Rust files in the source directory
+ source_code := read_source_code(source_code_path) or {
+ println(err)
+ return
+ }
+
+ // Determine the crate path from the source code path
+ crate_path := determine_crate_path(source_code_path) or {
+ println(err)
+ return
+ }
+
+ // Extract the module name from the directory path (last component)
+ name := extract_module_name_from_path(source_code_path)
+
+ // Create the prompt content for the AI
+ prompt_content := create_rhai_wrappers(name, source_code, read_file_safely('${current_dir}/prompts/example_script.md'),
+ read_file_safely('${current_dir}/prompts/wrapper.md'), read_file_safely('${current_dir}/prompts/errors.md'),
+ crate_path)
+
+ // Create the generator instance
+ gen := RhaiGen{
+ name: name
+ dir: source_code_path
+ }
+
+ // Run the task to generate Rhai wrappers
+ run_wrapper_generation_task(prompt_content, gen) or {
+ println('Task failed: ${err}')
+ return
+ }
+
+ println('Task completed successfully')
+ println('The wrapper files have been generated and compiled in the target directory.')
+ println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
}
// Validates command line arguments and returns the source code path
fn validate_command_args() !string {
- if os.args.len < 2 {
- return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory')
- }
-
- source_code_path := os.args[1]
-
- if !os.exists(source_code_path) {
- return error('Source code path does not exist: ${source_code_path}')
- }
-
- if !os.is_dir(source_code_path) {
- return error('Source code path is not a directory: ${source_code_path}')
- }
-
- return source_code_path
+ if os.args.len < 2 {
+ return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory')
+ }
+
+ source_code_path := os.args[1]
+
+ if !os.exists(source_code_path) {
+ return error('Source code path does not exist: ${source_code_path}')
+ }
+
+ if !os.is_dir(source_code_path) {
+ return error('Source code path is not a directory: ${source_code_path}')
+ }
+
+ return source_code_path
}
// Reads and combines all Rust files in the given directory
fn read_source_code(source_code_path string) !string {
- // Get all files in the directory
- files := os.ls(source_code_path) or {
- return error('Failed to list files in directory: ${err}')
- }
-
- // Combine all Rust files into a single source code string
- mut source_code := ''
- for file in files {
- file_path := os.join_path(source_code_path, file)
-
- // Skip directories and non-Rust files
- if os.is_dir(file_path) || !file.ends_with('.rs') {
- continue
- }
-
- // Read the file content
- file_content := os.read_file(file_path) or {
- println('Failed to read file ${file_path}: ${err}')
- continue
- }
-
- // Add file content to the combined source code
- source_code += '// File: ${file}\n${file_content}\n\n'
- }
-
- if source_code == '' {
- return error('No Rust files found in directory: ${source_code_path}')
- }
-
- return source_code
+ // Get all files in the directory
+ files := os.ls(source_code_path) or {
+ return error('Failed to list files in directory: ${err}')
+ }
+
+ // Combine all Rust files into a single source code string
+ mut source_code := ''
+ for file in files {
+ file_path := os.join_path(source_code_path, file)
+
+ // Skip directories and non-Rust files
+ if os.is_dir(file_path) || !file.ends_with('.rs') {
+ continue
+ }
+
+ // Read the file content
+ file_content := os.read_file(file_path) or {
+ println('Failed to read file ${file_path}: ${err}')
+ continue
+ }
+
+ // Add file content to the combined source code
+ source_code += '// File: ${file}\n${file_content}\n\n'
+ }
+
+ if source_code == '' {
+ return error('No Rust files found in directory: ${source_code_path}')
+ }
+
+ return source_code
}
// Determines the crate path from the source code path
fn determine_crate_path(source_code_path string) !string {
- // Extract the path relative to the src directory
- src_index := source_code_path.index('src/') or {
- return error('Could not determine crate path: src/ not found in path')
- }
-
- mut path_parts := source_code_path[src_index+4..].split('/')
- // Remove the last part (the file name)
- if path_parts.len > 0 {
- path_parts.delete_last()
- }
- rel_path := path_parts.join('::')
- return 'sal::${rel_path}'
+ // Extract the path relative to the src directory
+ src_index := source_code_path.index('src/') or {
+ return error('Could not determine crate path: src/ not found in path')
+ }
+
+ mut path_parts := source_code_path[src_index + 4..].split('/')
+ // Remove the last part (the file name)
+ if path_parts.len > 0 {
+ path_parts.delete_last()
+ }
+ rel_path := path_parts.join('::')
+ return 'sal::${rel_path}'
}
// Extracts the module name from a directory path
fn extract_module_name_from_path(path string) string {
- dir_parts := path.split('/')
- return dir_parts[dir_parts.len - 1]
+ dir_parts := path.split('/')
+ return dir_parts[dir_parts.len - 1]
}
// Helper function to read a file or return empty string if file doesn't exist
fn read_file_safely(file_path string) string {
- return os.read_file(file_path) or { '' }
+ return os.read_file(file_path) or { '' }
}
// Runs the task to generate Rhai wrappers
fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
- // Create a new task
- mut task := escalayer.new_task(
- name: 'rhai_wrapper_creator.escalayer'
- description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
- )
-
- // Create model configs
- sonnet_model := escalayer.ModelConfig{
- name: 'anthropic/claude-3.7-sonnet'
- provider: 'anthropic'
- temperature: 0.7
- max_tokens: 25000
- }
-
- gpt4_model := escalayer.ModelConfig{
- name: 'gpt-4'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 25000
- }
-
- // Create a prompt function that returns the prepared content
- prompt_function := fn [prompt_content] (input string) string {
- return prompt_content
- }
-
- // Define a single unit task that handles everything
- task.new_unit_task(
- name: 'create_rhai_wrappers'
- prompt_function: prompt_function
- callback_function: gen.process_rhai_wrappers
- base_model: sonnet_model
- retry_model: gpt4_model
- retry_count: 1
- )
-
- // Initiate the task
- return task.initiate('')
+ // Create a new task
+ mut task := escalayer.new_task(
+ name: 'rhai_wrapper_creator.escalayer'
+ description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
+ )
+
+ // Create model configs
+ sonnet_model := escalayer.ModelConfig{
+ name: 'anthropic/claude-3.7-sonnet'
+ provider: 'anthropic'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ gpt4_model := escalayer.ModelConfig{
+ name: 'gpt-4'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ // Create a prompt function that returns the prepared content
+ prompt_function := fn [prompt_content] (input string) string {
+ return prompt_content
+ }
+
+ // Define a single unit task that handles everything
+ task.new_unit_task(
+ name: 'create_rhai_wrappers'
+ prompt_function: prompt_function
+ callback_function: gen.process_rhai_wrappers
+ base_model: sonnet_model
+ retry_model: gpt4_model
+ retry_count: 1
+ )
+
+ // Initiate the task
+ return task.initiate('')
}
// Define a Rhai wrapper generator function for Container functions
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
- // Load all required template and guide files
- guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')
- engine := $tmpl('./prompts/engine.md')
- vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')
- rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')
- rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')
- generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
-
- // Build the prompt content
- return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md,
- guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide,
- generic_wrapper_rs, engine)
+ // Load all required template and guide files
+ guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')
+ engine := $tmpl('./prompts/engine.md')
+ vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')
+ rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')
+ rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')
+ generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
+
+ // Build the prompt content
+ return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md,
+ guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide, generic_wrapper_rs,
+ engine)
}
// Helper function to load guide files with error handling
fn load_guide_file(path string) string {
- return os.read_file(path) or {
- eprintln('Warning: Failed to read guide file: ${path}')
- return ''
- }
+ return os.read_file(path) or {
+ eprintln('Warning: Failed to read guide file: ${path}')
+ return ''
+ }
}
// Builds the prompt content for the AI
-fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string,
- errors_md string, guides string, vector_vs_array string,
- rhai_integration_fixes string, rhai_syntax_guide string,
- generic_wrapper_rs string, engine string) string {
- return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
+fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string,
+ errors_md string, guides string, vector_vs_array string,
+ rhai_integration_fixes string, rhai_syntax_guide string,
+ generic_wrapper_rs string, engine string) string {
+ return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
${guides}
${vector_vs_array}
${example_rhai}
@@ -313,305 +308,289 @@ your engine create function is called `create_rhai_engine`
@[params]
pub struct WrapperModule {
pub:
- lib_rs string
- example_rs string
- engine_rs string
- cargo_toml string
- example_rhai string
- generic_wrapper_rs string
- wrapper_rs string
+ lib_rs string
+ example_rs string
+ engine_rs string
+ cargo_toml string
+ example_rhai string
+ generic_wrapper_rs string
+ wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
-fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string)! string {
- // Define project directory paths
- name := name_
- project_dir := '${base_dir}/rhai'
-
- // Create the project using cargo new --lib
- if os.exists(project_dir) {
- os.rmdir_all(project_dir) or {
- return error('Failed to clean existing project directory: ${err}')
- }
- }
-
- // Run cargo new --lib to create the project
- os.chdir(base_dir) or {
- return error('Failed to change directory to base directory: ${err}')
- }
-
- cargo_new_result := os.execute('cargo new --lib rhai')
- if cargo_new_result.exit_code != 0 {
- return error('Failed to create new library project: ${cargo_new_result.output}')
- }
-
- // Create examples directory
- examples_dir := '${project_dir}/examples'
- os.mkdir_all(examples_dir) or {
- return error('Failed to create examples directory: ${err}')
- }
-
- // Write the lib.rs file
- if wrapper.lib_rs != '' {
- os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
- return error('Failed to write lib.rs: ${err}')
- }
- }
+fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
+ // Define project directory paths
+ name := name_
+ project_dir := '${base_dir}/rhai'
- // Write the wrapper.rs file
- if wrapper.wrapper_rs != '' {
- os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
- return error('Failed to write wrapper.rs: ${err}')
- }
- }
-
- // Write the generic wrapper.rs file
- if wrapper.generic_wrapper_rs != '' {
- os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
- return error('Failed to write generic wrapper.rs: ${err}')
- }
- }
-
- // Write the example.rs file
- if wrapper.example_rs != '' {
- os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
- return error('Failed to write example.rs: ${err}')
- }
- }
-
- // Write the engine.rs file if provided
- if wrapper.engine_rs != '' {
- os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
- return error('Failed to write engine.rs: ${err}')
- }
- }
-
- // Write the Cargo.toml file
- if wrapper.cargo_toml != '' {
- os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
- return error('Failed to write Cargo.toml: ${err}')
- }
- }
-
- // Write the example.rhai file if provided
- if wrapper.example_rhai != '' {
- os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
- return error('Failed to write example.rhai: ${err}')
- }
- }
-
- return project_dir
+ // Create the project using cargo new --lib
+ if os.exists(project_dir) {
+ os.rmdir_all(project_dir) or {
+ return error('Failed to clean existing project directory: ${err}')
+ }
+ }
+
+ // Run cargo new --lib to create the project
+ os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
+
+ cargo_new_result := os.execute('cargo new --lib rhai')
+ if cargo_new_result.exit_code != 0 {
+ return error('Failed to create new library project: ${cargo_new_result.output}')
+ }
+
+ // Create examples directory
+ examples_dir := '${project_dir}/examples'
+ os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
+
+ // Write the lib.rs file
+ if wrapper.lib_rs != '' {
+ os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
+ return error('Failed to write lib.rs: ${err}')
+ }
+ }
+
+ // Write the wrapper.rs file
+ if wrapper.wrapper_rs != '' {
+ os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
+ return error('Failed to write wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the generic wrapper.rs file
+ if wrapper.generic_wrapper_rs != '' {
+ os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
+ return error('Failed to write generic wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the example.rs file
+ if wrapper.example_rs != '' {
+ os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
+ return error('Failed to write example.rs: ${err}')
+ }
+ }
+
+ // Write the engine.rs file if provided
+ if wrapper.engine_rs != '' {
+ os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
+ return error('Failed to write engine.rs: ${err}')
+ }
+ }
+
+ // Write the Cargo.toml file
+ if wrapper.cargo_toml != '' {
+ os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
+ return error('Failed to write Cargo.toml: ${err}')
+ }
+ }
+
+ // Write the example.rhai file if provided
+ if wrapper.example_rhai != '' {
+ os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
+ return error('Failed to write example.rhai: ${err}')
+ }
+ }
+
+ return project_dir
}
// Helper function to extract code blocks from the response
fn extract_code_block(response string, identifier string, language string) string {
- // Find the start marker for the code block
- mut start_marker := '```${language}\n// ${identifier}'
- if language == '' {
- start_marker = '```\n// ${identifier}'
- }
-
- start_index := response.index(start_marker) or {
- // Try alternative format
- mut alt_marker := '```${language}\n${identifier}'
- if language == '' {
- alt_marker = '```\n${identifier}'
- }
-
- response.index(alt_marker) or {
- return ''
- }
- }
-
- // Find the end marker
- end_marker := '```'
- end_index := response.index_after(end_marker, start_index + start_marker.len) or {
- return ''
- }
-
- // Extract the content between the markers
- content_start := start_index + start_marker.len
- content := response[content_start..end_index].trim_space()
-
- return content
+ // Find the start marker for the code block
+ mut start_marker := '```${language}\n// ${identifier}'
+ if language == '' {
+ start_marker = '```\n// ${identifier}'
+ }
+
+ start_index := response.index(start_marker) or {
+ // Try alternative format
+ mut alt_marker := '```${language}\n${identifier}'
+ if language == '' {
+ alt_marker = '```\n${identifier}'
+ }
+
+ response.index(alt_marker) or { return '' }
+ }
+
+ // Find the end marker
+ end_marker := '```'
+ end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
+
+ // Extract the content between the markers
+ content_start := start_index + start_marker.len
+ content := response[content_start..end_index].trim_space()
+
+ return content
}
// Extract module name from wrapper code
fn extract_module_name(code string) string {
- lines := code.split('\n')
-
- for line in lines {
- // Look for pub mod or mod declarations
- if line.contains('pub mod ') || line.contains('mod ') {
- // Extract module name
- mut parts := []string{}
- if line.contains('pub mod ') {
- parts = line.split('pub mod ')
- } else {
- parts = line.split('mod ')
- }
-
- if parts.len > 1 {
- // Extract the module name and remove any trailing characters
- mut name := parts[1].trim_space()
- // Remove any trailing { or ; or whitespace
- name = name.trim_right('{').trim_right(';').trim_space()
- if name != '' {
- return name
- }
- }
- }
- }
-
- return ''
+ lines := code.split('\n')
+
+ for line in lines {
+ // Look for pub mod or mod declarations
+ if line.contains('pub mod ') || line.contains('mod ') {
+ // Extract module name
+ mut parts := []string{}
+ if line.contains('pub mod ') {
+ parts = line.split('pub mod ')
+ } else {
+ parts = line.split('mod ')
+ }
+
+ if parts.len > 1 {
+ // Extract the module name and remove any trailing characters
+ mut name := parts[1].trim_space()
+ // Remove any trailing { or ; or whitespace
+ name = name.trim_right('{').trim_right(';').trim_space()
+ if name != '' {
+ return name
+ }
+ }
+ }
+ }
+
+ return ''
}
// RhaiGen struct for generating Rhai wrappers
struct RhaiGen {
- name string
- dir string
+ name string
+ dir string
}
// Process the AI response and compile the generated code
-fn (gen RhaiGen)process_rhai_wrappers(response string)! string {
- // Extract code blocks from the response
- code_blocks := extract_code_blocks(response) or {
- return err
- }
-
- // Extract function names from the wrapper.rs content
- functions := extract_functions_from_code(code_blocks.wrapper_rs)
-
- println('Using module name: ${gen.name}_rhai')
- println('Extracted functions: ${functions.join(", ")}')
-
- name := gen.name
-
- // Create a WrapperModule struct with the extracted content
- wrapper := WrapperModule{
- lib_rs: $tmpl('./templates/lib.rs')
- wrapper_rs: code_blocks.wrapper_rs
- example_rs: $tmpl('./templates/example.rs')
- engine_rs: code_blocks.engine_rs
- generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
- cargo_toml: $tmpl('./templates/cargo.toml')
- example_rhai: code_blocks.example_rhai
- }
-
- // Create the wrapper module
- project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or {
- return error('Failed to create wrapper module: ${err}')
- }
-
- // Build and run the project
- build_output, run_output := build_and_run_project(project_dir) or {
- return err
- }
-
- return format_success_message(project_dir, build_output, run_output)
+fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
+ // Extract code blocks from the response
+ code_blocks := extract_code_blocks(response) or { return err }
+
+ // Extract function names from the wrapper.rs content
+ functions := extract_functions_from_code(code_blocks.wrapper_rs)
+
+ println('Using module name: ${gen.name}_rhai')
+ println('Extracted functions: ${functions.join(', ')}')
+
+ name := gen.name
+
+ // Create a WrapperModule struct with the extracted content
+ wrapper := WrapperModule{
+ lib_rs: $tmpl('./templates/lib.rs')
+ wrapper_rs: code_blocks.wrapper_rs
+ example_rs: $tmpl('./templates/example.rs')
+ engine_rs: code_blocks.engine_rs
+ generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
+ cargo_toml: $tmpl('./templates/cargo.toml')
+ example_rhai: code_blocks.example_rhai
+ }
+
+ // Create the wrapper module
+ project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or {
+ return error('Failed to create wrapper module: ${err}')
+ }
+
+ // Build and run the project
+ build_output, run_output := build_and_run_project(project_dir) or { return err }
+
+ return format_success_message(project_dir, build_output, run_output)
}
// CodeBlocks struct to hold extracted code blocks
struct CodeBlocks {
- wrapper_rs string
- engine_rs string
- example_rhai string
+ wrapper_rs string
+ engine_rs string
+ example_rhai string
}
// Extract code blocks from the AI response
-fn extract_code_blocks(response string)! CodeBlocks {
- // Extract wrapper.rs content
- wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
- if wrapper_rs_content == '' {
- return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
- }
-
- // Extract engine.rs content
- mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
- if engine_rs_content == '' {
- // Try to extract from the response without explicit language marker
- engine_rs_content = extract_code_block(response, 'engine.rs', '')
- }
-
- // Extract example.rhai content
- mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- // Try to extract from the response without explicit language marker
- example_rhai_content = extract_code_block(response, 'example.rhai', '')
- if example_rhai_content == '' {
- // Use the example from the template
- example_rhai_content = load_example_from_template() or {
- return err
- }
- }
- }
-
- return CodeBlocks{
- wrapper_rs: wrapper_rs_content
- engine_rs: engine_rs_content
- example_rhai: example_rhai_content
- }
+fn extract_code_blocks(response string) !CodeBlocks {
+ // Extract wrapper.rs content
+ wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
+ if wrapper_rs_content == '' {
+ return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
+ }
+
+ // Extract engine.rs content
+ mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
+ if engine_rs_content == '' {
+ // Try to extract from the response without explicit language marker
+ engine_rs_content = extract_code_block(response, 'engine.rs', '')
+ }
+
+ // Extract example.rhai content
+ mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
+ if example_rhai_content == '' {
+ // Try to extract from the response without explicit language marker
+ example_rhai_content = extract_code_block(response, 'example.rhai', '')
+ if example_rhai_content == '' {
+ // Use the example from the template
+ example_rhai_content = load_example_from_template() or { return err }
+ }
+ }
+
+ return CodeBlocks{
+ wrapper_rs: wrapper_rs_content
+ engine_rs: engine_rs_content
+ example_rhai: example_rhai_content
+ }
}
// Load example.rhai from template file
-fn load_example_from_template()! string {
- example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
- return error('Failed to read example.rhai template: ${err}')
- }
-
- // Extract the code block from the markdown file
- example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- return error('Failed to extract example.rhai from template file')
- }
-
- return example_rhai_content
+fn load_example_from_template() !string {
+ example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
+ return error('Failed to read example.rhai template: ${err}')
+ }
+
+ // Extract the code block from the markdown file
+ example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai')
+ if example_rhai_content == '' {
+ return error('Failed to extract example.rhai from template file')
+ }
+
+ return example_rhai_content
}
// Build and run the project
-fn build_and_run_project(project_dir string)! (string, string) {
- // Change to the project directory
- os.chdir(project_dir) or {
- return error('Failed to change directory to project: ${err}')
- }
-
- // Run cargo build first
- build_result := os.execute('cargo build')
- if build_result.exit_code != 0 {
- return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
- }
-
- // Run the example
- run_result := os.execute('cargo run --example example')
-
- return build_result.output, run_result.output
+fn build_and_run_project(project_dir string) !(string, string) {
+ // Change to the project directory
+ os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
+
+ // Run cargo build first
+ build_result := os.execute('cargo build')
+ if build_result.exit_code != 0 {
+ return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
+ }
+
+ // Run the example
+ run_result := os.execute('cargo run --example example')
+
+ return build_result.output, run_result.output
}
// Format success message
fn format_success_message(project_dir string, build_output string, run_output string) string {
- return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
+ return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
}
// Extract function names from wrapper code
fn extract_functions_from_code(code string) []string {
- mut functions := []string{}
- lines := code.split('\n')
-
- for line in lines {
- if line.contains('pub fn ') && !line.contains('//') {
- // Extract function name
- parts := line.split('pub fn ')
- if parts.len > 1 {
- name_parts := parts[1].split('(')
- if name_parts.len > 0 {
- fn_name := name_parts[0].trim_space()
- if fn_name != '' {
- functions << fn_name
- }
- }
- }
- }
- }
-
- return functions
-}
\ No newline at end of file
+ mut functions := []string{}
+ lines := code.split('\n')
+
+ for line in lines {
+ if line.contains('pub fn ') && !line.contains('//') {
+ // Extract function name
+ parts := line.split('pub fn ')
+ if parts.len > 1 {
+ name_parts := parts[1].split('(')
+ if name_parts.len > 0 {
+ fn_name := name_parts[0].trim_space()
+ if fn_name != '' {
+ functions << fn_name
+ }
+ }
+ }
+ }
+ }
+
+ return functions
+}
diff --git a/lib/mcp/rhai/logic/logic.v b/lib/mcp/rhai/logic/logic.v
index 32ee4db7..aaeac298 100644
--- a/lib/mcp/rhai/logic/logic.v
+++ b/lib/mcp/rhai/logic/logic.v
@@ -6,274 +6,263 @@ import freeflowuniverse.herolib.ai.utils
import os
pub fn generate_rhai_wrapper(name string, source_path string) !string {
- prompt := rhai_wrapper_generation_prompt(name, source_path) or {panic(err)}
- return run_wrapper_generation_task(prompt, RhaiGen{
- name: name
- dir: source_path
- }) or {panic(err)}
+ prompt := rhai_wrapper_generation_prompt(name, source_path) or { panic(err) }
+ return run_wrapper_generation_task(prompt, RhaiGen{
+ name: name
+ dir: source_path
+ }) or { panic(err) }
}
// Runs the task to generate Rhai wrappers
pub fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
- // Create a new task
- mut task := escalayer.new_task(
- name: 'rhai_wrapper_creator.escalayer'
- description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
- )
-
- // Create model configs
- sonnet_model := escalayer.ModelConfig{
- name: 'anthropic/claude-3.7-sonnet'
- provider: 'anthropic'
- temperature: 0.7
- max_tokens: 25000
- }
-
- gpt4_model := escalayer.ModelConfig{
- name: 'gpt-4'
- provider: 'openai'
- temperature: 0.7
- max_tokens: 25000
- }
-
- // Create a prompt function that returns the prepared content
- prompt_function := fn [prompt_content] (input string) string {
- return prompt_content
- }
-
- // Define a single unit task that handles everything
- task.new_unit_task(
- name: 'create_rhai_wrappers'
- prompt_function: prompt_function
- callback_function: gen.process_rhai_wrappers
- base_model: sonnet_model
- retry_model: gpt4_model
- retry_count: 1
- )
-
- // Initiate the task
- return task.initiate('')
+ // Create a new task
+ mut task := escalayer.new_task(
+ name: 'rhai_wrapper_creator.escalayer'
+ description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
+ )
+
+ // Create model configs
+ sonnet_model := escalayer.ModelConfig{
+ name: 'anthropic/claude-3.7-sonnet'
+ provider: 'anthropic'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ gpt4_model := escalayer.ModelConfig{
+ name: 'gpt-4'
+ provider: 'openai'
+ temperature: 0.7
+ max_tokens: 25000
+ }
+
+ // Create a prompt function that returns the prepared content
+ prompt_function := fn [prompt_content] (input string) string {
+ return prompt_content
+ }
+
+ // Define a single unit task that handles everything
+ task.new_unit_task(
+ name: 'create_rhai_wrappers'
+ prompt_function: prompt_function
+ callback_function: gen.process_rhai_wrappers
+ base_model: sonnet_model
+ retry_model: gpt4_model
+ retry_count: 1
+ )
+
+ // Initiate the task
+ return task.initiate('')
}
// Define a Rhai wrapper generator function for Container functions
pub fn rhai_wrapper_generation_prompt(name string, source_code string) !string {
- current_dir := os.dir(@FILE)
- example_rhai := os.read_file('${current_dir}/prompts/example_script.md') or {panic(err)}
- wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md') or {panic(err)}
- errors_md := os.read_file('${current_dir}/prompts/errors.md') or {panic(err)}
-
+ current_dir := os.dir(@FILE)
+ example_rhai := os.read_file('${current_dir}/prompts/example_script.md') or { panic(err) }
+ wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md') or { panic(err) }
+ errors_md := os.read_file('${current_dir}/prompts/errors.md') or { panic(err) }
+
// Load all required template and guide files
- guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')!
- engine := $tmpl('./prompts/engine.md')
- vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')!
- rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')!
- rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')!
- generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
-
- prompt := $tmpl('./prompts/main.md')
+ guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')!
+ engine := $tmpl('./prompts/engine.md')
+ vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')!
+ rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')!
+ rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')!
+ generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
+
+ prompt := $tmpl('./prompts/main.md')
return prompt
}
@[params]
pub struct WrapperModule {
pub:
- lib_rs string
- example_rs string
- engine_rs string
- cargo_toml string
- example_rhai string
- generic_wrapper_rs string
- wrapper_rs string
+ lib_rs string
+ example_rs string
+ engine_rs string
+ cargo_toml string
+ example_rhai string
+ generic_wrapper_rs string
+ wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
-pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string {
-
+pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string) !string {
// Define project directory paths
- project_dir := '${path}/rhai'
-
- // Create the project using cargo new --lib
- if os.exists(project_dir) {
- os.rmdir_all(project_dir) or {
- return error('Failed to clean existing project directory: ${err}')
- }
- }
-
- // Run cargo new --lib to create the project
- os.chdir(path) or {
- return error('Failed to change directory to base directory: ${err}')
- }
-
- cargo_new_result := os.execute('cargo new --lib rhai')
- if cargo_new_result.exit_code != 0 {
- return error('Failed to create new library project: ${cargo_new_result.output}')
- }
-
- // Create examples directory
- examples_dir := '${project_dir}/examples'
- os.mkdir_all(examples_dir) or {
- return error('Failed to create examples directory: ${err}')
- }
-
- // Write the lib.rs file
- if wrapper.lib_rs != '' {
- os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
- return error('Failed to write lib.rs: ${err}')
- }
- }
+ project_dir := '${path}/rhai'
- // Write the wrapper.rs file
- if wrapper.wrapper_rs != '' {
- os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
- return error('Failed to write wrapper.rs: ${err}')
- }
- }
-
- // Write the generic wrapper.rs file
- if wrapper.generic_wrapper_rs != '' {
- os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
- return error('Failed to write generic wrapper.rs: ${err}')
- }
- }
-
- // Write the example.rs file
- if wrapper.example_rs != '' {
- os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
- return error('Failed to write example.rs: ${err}')
- }
- }
-
- // Write the engine.rs file if provided
- if wrapper.engine_rs != '' {
- os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
- return error('Failed to write engine.rs: ${err}')
- }
- }
-
- // Write the Cargo.toml file
- os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
- return error('Failed to write Cargo.toml: ${err}')
- }
-
- // Write the example.rhai file
- os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
- return error('Failed to write example.rhai: ${err}')
- }
-
- return project_dir
+ // Create the project using cargo new --lib
+ if os.exists(project_dir) {
+ os.rmdir_all(project_dir) or {
+ return error('Failed to clean existing project directory: ${err}')
+ }
+ }
+
+ // Run cargo new --lib to create the project
+ os.chdir(path) or { return error('Failed to change directory to base directory: ${err}') }
+
+ cargo_new_result := os.execute('cargo new --lib rhai')
+ if cargo_new_result.exit_code != 0 {
+ return error('Failed to create new library project: ${cargo_new_result.output}')
+ }
+
+ // Create examples directory
+ examples_dir := '${project_dir}/examples'
+ os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
+
+ // Write the lib.rs file
+ if wrapper.lib_rs != '' {
+ os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
+ return error('Failed to write lib.rs: ${err}')
+ }
+ }
+
+ // Write the wrapper.rs file
+ if wrapper.wrapper_rs != '' {
+ os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
+ return error('Failed to write wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the generic wrapper.rs file
+ if wrapper.generic_wrapper_rs != '' {
+ os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
+ return error('Failed to write generic wrapper.rs: ${err}')
+ }
+ }
+
+ // Write the example.rs file
+ if wrapper.example_rs != '' {
+ os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
+ return error('Failed to write example.rs: ${err}')
+ }
+ }
+
+ // Write the engine.rs file if provided
+ if wrapper.engine_rs != '' {
+ os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
+ return error('Failed to write engine.rs: ${err}')
+ }
+ }
+
+ // Write the Cargo.toml file
+ os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
+ return error('Failed to write Cargo.toml: ${err}')
+ }
+
+ // Write the example.rhai file
+ os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
+ return error('Failed to write example.rhai: ${err}')
+ }
+
+ return project_dir
}
-
-
// Extract module name from wrapper code
fn extract_module_name(code string) string {
- lines := code.split('\n')
-
- for line in lines {
- // Look for pub mod or mod declarations
- if line.contains('pub mod ') || line.contains('mod ') {
- // Extract module name
- mut parts := []string{}
- if line.contains('pub mod ') {
- parts = line.split('pub mod ')
- } else {
- parts = line.split('mod ')
- }
-
- if parts.len > 1 {
- // Extract the module name and remove any trailing characters
- mut name := parts[1].trim_space()
- // Remove any trailing { or ; or whitespace
- name = name.trim_right('{').trim_right(';').trim_space()
- if name != '' {
- return name
- }
- }
- }
- }
-
- return ''
+ lines := code.split('\n')
+
+ for line in lines {
+ // Look for pub mod or mod declarations
+ if line.contains('pub mod ') || line.contains('mod ') {
+ // Extract module name
+ mut parts := []string{}
+ if line.contains('pub mod ') {
+ parts = line.split('pub mod ')
+ } else {
+ parts = line.split('mod ')
+ }
+
+ if parts.len > 1 {
+ // Extract the module name and remove any trailing characters
+ mut name := parts[1].trim_space()
+ // Remove any trailing { or ; or whitespace
+ name = name.trim_right('{').trim_right(';').trim_space()
+ if name != '' {
+ return name
+ }
+ }
+ }
+ }
+
+ return ''
}
// RhaiGen struct for generating Rhai wrappers
struct RhaiGen {
- name string
- dir string
+ name string
+ dir string
}
// Process the AI response and compile the generated code
-fn (gen RhaiGen) process_rhai_wrappers(response string)! string {
- // Extract code blocks from the response
- code_blocks := extract_code_blocks(response) or {
- return err
- }
-
- name := gen.name
-
- // Create a WrapperModule struct with the extracted content
- wrapper := WrapperModule{
- lib_rs: $tmpl('./templates/lib.rs')
- wrapper_rs: code_blocks.wrapper_rs
- example_rs: $tmpl('./templates/example.rs')
- engine_rs: code_blocks.engine_rs
- generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
- cargo_toml: $tmpl('./templates/cargo.toml')
- example_rhai: code_blocks.example_rhai
- }
-
- // Create the wrapper module
- project_dir := write_rhai_wrapper_module(wrapper, gen.name, gen.dir) or {
- return error('Failed to create wrapper module: ${err}')
- }
-
- // Build and run the project
- build_output, run_output := rust.run_example(project_dir, 'example') or {
- return err
- }
-
- return format_success_message(project_dir, build_output, run_output)
+fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
+ // Extract code blocks from the response
+ code_blocks := extract_code_blocks(response) or { return err }
+
+ name := gen.name
+
+ // Create a WrapperModule struct with the extracted content
+ wrapper := WrapperModule{
+ lib_rs: $tmpl('./templates/lib.rs')
+ wrapper_rs: code_blocks.wrapper_rs
+ example_rs: $tmpl('./templates/example.rs')
+ engine_rs: code_blocks.engine_rs
+ generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
+ cargo_toml: $tmpl('./templates/cargo.toml')
+ example_rhai: code_blocks.example_rhai
+ }
+
+ // Create the wrapper module
+ project_dir := write_rhai_wrapper_module(wrapper, gen.name, gen.dir) or {
+ return error('Failed to create wrapper module: ${err}')
+ }
+
+ // Build and run the project
+ build_output, run_output := rust.run_example(project_dir, 'example') or { return err }
+
+ return format_success_message(project_dir, build_output, run_output)
}
// CodeBlocks struct to hold extracted code blocks
struct CodeBlocks {
- wrapper_rs string
- engine_rs string
- example_rhai string
+ wrapper_rs string
+ engine_rs string
+ example_rhai string
}
// Extract code blocks from the AI response
-fn extract_code_blocks(response string)! CodeBlocks {
- // Extract wrapper.rs content
- wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
- if wrapper_rs_content == '' {
- return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
- }
-
- // Extract engine.rs content
- mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
- if engine_rs_content == '' {
- // Try to extract from the response without explicit language marker
- engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
- }
-
- // Extract example.rhai content
- mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
- if example_rhai_content == '' {
- // Try to extract from the response without explicit language marker
- example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
- if example_rhai_content == '' {
- return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
- }
- }
-
- return CodeBlocks{
- wrapper_rs: wrapper_rs_content
- engine_rs: engine_rs_content
- example_rhai: example_rhai_content
- }
+fn extract_code_blocks(response string) !CodeBlocks {
+ // Extract wrapper.rs content
+ wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
+ if wrapper_rs_content == '' {
+ return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
+ }
+
+ // Extract engine.rs content
+ mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
+ if engine_rs_content == '' {
+ // Try to extract from the response without explicit language marker
+ engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
+ }
+
+ // Extract example.rhai content
+ mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
+ if example_rhai_content == '' {
+ // Try to extract from the response without explicit language marker
+ example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
+ if example_rhai_content == '' {
+ return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
+ }
+ }
+
+ return CodeBlocks{
+ wrapper_rs: wrapper_rs_content
+ engine_rs: engine_rs_content
+ example_rhai: example_rhai_content
+ }
}
// Format success message
fn format_success_message(project_dir string, build_output string, run_output string) string {
- return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
+ return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
}
diff --git a/lib/mcp/rhai/mcp/command.v b/lib/mcp/rhai/mcp/command.v
index fc8c82d7..956ca1dc 100644
--- a/lib/mcp/rhai/mcp/command.v
+++ b/lib/mcp/rhai/mcp/command.v
@@ -2,17 +2,17 @@ module mcp
import cli
-pub const command := cli.Command{
- sort_flags: true
- name: 'rhai'
+pub const command = cli.Command{
+ sort_flags: true
+ name: 'rhai'
// execute: cmd_mcpgen
description: 'rhai command'
- commands: [
+ commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the Rhai server'
- }
+ },
]
}
@@ -20,4 +20,3 @@ fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server()!
server.start()!
}
-
diff --git a/lib/mcp/rhai/mcp/mcp.v b/lib/mcp/rhai/mcp/mcp.v
index a47bfdac..b4321d3a 100644
--- a/lib/mcp/rhai/mcp/mcp.v
+++ b/lib/mcp/rhai/mcp/mcp.v
@@ -9,10 +9,10 @@ pub fn new_mcp_server() !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
- tools: {
+ tools: {
'generate_rhai_wrapper': generate_rhai_wrapper_spec
}
- tool_handlers: {
+ tool_handlers: {
'generate_rhai_wrapper': generate_rhai_wrapper_handler
}
prompts: {
@@ -30,4 +30,4 @@ pub fn new_mcp_server() !&mcp.Server {
}
})!
return server
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/rhai/mcp/prompts.v b/lib/mcp/rhai/mcp/prompts.v
index 95432ccf..e1632ef9 100644
--- a/lib/mcp/rhai/mcp/prompts.v
+++ b/lib/mcp/rhai/mcp/prompts.v
@@ -5,38 +5,39 @@ import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.mcp.rhai.logic
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.lang.rust
-import x.json2 as json { Any }
+import x.json2 as json
// Tool definition for the create_rhai_wrapper function
const rhai_wrapper_prompt_spec = mcp.Prompt{
- name: 'rhai_wrapper'
- description: 'provides a prompt for creating Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
- arguments: [
- mcp.PromptArgument{
- name: 'source_path'
- description: 'Path to the source directory'
- required: true
- }
- ]
+ name: 'rhai_wrapper'
+ description: 'provides a prompt for creating Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
+ arguments: [
+ mcp.PromptArgument{
+ name: 'source_path'
+ description: 'Path to the source directory'
+ required: true
+ },
+ ]
}
// Tool handler for the create_rhai_wrapper function
pub fn rhai_wrapper_prompt_handler(arguments []string) ![]mcp.PromptMessage {
source_path := arguments[0]
- // Read and combine all Rust files in the source directory
- source_code := rust.read_source_code(source_path)!
-
- // Extract the module name from the directory path (last component)
- name := rust.extract_module_name_from_path(source_path)
-
+ // Read and combine all Rust files in the source directory
+ source_code := rust.read_source_code(source_path)!
-result := logic.rhai_wrapper_generation_prompt(name, source_code)!
-return [mcp.PromptMessage{
- role: 'assistant'
- content: mcp.PromptContent{
- typ: 'text'
- text: result
- }
-}]
-}
\ No newline at end of file
+ // Extract the module name from the directory path (last component)
+ name := rust.extract_module_name_from_path(source_path)
+
+ result := logic.rhai_wrapper_generation_prompt(name, source_code)!
+ return [
+ mcp.PromptMessage{
+ role: 'assistant'
+ content: mcp.PromptContent{
+ typ: 'text'
+ text: result
+ }
+ },
+ ]
+}
diff --git a/lib/mcp/rhai/mcp/specifications.v b/lib/mcp/rhai/mcp/specifications.v
index 61e0cdd1..5b1644ba 100644
--- a/lib/mcp/rhai/mcp/specifications.v
+++ b/lib/mcp/rhai/mcp/specifications.v
@@ -1,19 +1,19 @@
module mcp
import freeflowuniverse.herolib.mcp
-import x.json2 as json { Any }
+import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import log
const specs = mcp.Tool{
name: 'rhai_interface'
description: 'Add Rhai Interface to Rust Code Files'
- input_schema: jsonschema.Schema{
+ input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string',
- description: 'Path to a .rs file or directory containing .rs files to make rhai interface for',
+ typ: 'string'
+ description: 'Path to a .rs file or directory containing .rs files to make rhai interface for'
})
}
required: ['path']
diff --git a/lib/mcp/rhai/mcp/tools.v b/lib/mcp/rhai/mcp/tools.v
index 2732a92d..c83fc61e 100644
--- a/lib/mcp/rhai/mcp/tools.v
+++ b/lib/mcp/rhai/mcp/tools.v
@@ -8,32 +8,31 @@ import x.json2 as json { Any }
// Tool definition for the generate_rhai_wrapper function
const generate_rhai_wrapper_spec = mcp.Tool{
- name: 'generate_rhai_wrapper'
- description: 'generate_rhai_wrapper receives the name of a V language function string, and the path to the module in which it exists.'
- input_schema: jsonschema.Schema{
- typ: 'object'
- properties: {
- 'name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- }),
- 'source_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
- })
- }
- required: ['name', 'source_path']
- }
+ name: 'generate_rhai_wrapper'
+ description: 'generate_rhai_wrapper receives the name of a V language function string, and the path to the module in which it exists.'
+ input_schema: jsonschema.Schema{
+ typ: 'object'
+ properties: {
+ 'name': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ 'source_path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
+ })
+ }
+ required: ['name', 'source_path']
+ }
}
// Tool handler for the generate_rhai_wrapper function
pub fn generate_rhai_wrapper_handler(arguments map[string]Any) !mcp.ToolCallResult {
name := arguments['name'].str()
source_path := arguments['source_path'].str()
- result := logic.generate_rhai_wrapper(name, source_path)
- or {
+ result := logic.generate_rhai_wrapper(name, source_path) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](result)
+ content: mcp.result_to_mcp_tool_contents[string](result)
}
}
diff --git a/lib/mcp/rhai/rhai.v b/lib/mcp/rhai/rhai.v
index 28d59606..adc61b86 100644
--- a/lib/mcp/rhai/rhai.v
+++ b/lib/mcp/rhai/rhai.v
@@ -1 +1 @@
-module rhai
\ No newline at end of file
+module rhai
diff --git a/lib/mcp/vcode/cmd/main.v b/lib/mcp/vcode/cmd/main.v
index eb7841f8..5a3d1c70 100644
--- a/lib/mcp/vcode/cmd/main.v
+++ b/lib/mcp/vcode/cmd/main.v
@@ -8,7 +8,7 @@ fn main() {
eprintln('Failed to create MCP server: ${err}')
return
}
-
+
// Start the server
server.start() or {
eprintln('Failed to start MCP server: ${err}')
diff --git a/lib/mcp/vcode/logic/server.v b/lib/mcp/vcode/logic/server.v
index 92b63139..cb69a00d 100644
--- a/lib/mcp/vcode/logic/server.v
+++ b/lib/mcp/vcode/logic/server.v
@@ -15,11 +15,11 @@ pub fn new_mcp_server(v &VCode) !&mcp.Server {
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
'get_function_from_file': get_function_from_file_tool
- 'write_vfile': write_vfile_tool
+ 'write_vfile': write_vfile_tool
}
tool_handlers: {
'get_function_from_file': v.get_function_from_file_tool_handler
- 'write_vfile': v.write_vfile_tool_handler
+ 'write_vfile': v.write_vfile_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -30,4 +30,4 @@ pub fn new_mcp_server(v &VCode) !&mcp.Server {
}
})!
return server
-}
\ No newline at end of file
+}
diff --git a/lib/mcp/vcode/logic/vlang_tools.v b/lib/mcp/vcode/logic/vlang_tools.v
index 930bfa54..d33a73ed 100644
--- a/lib/mcp/vcode/logic/vlang_tools.v
+++ b/lib/mcp/vcode/logic/vlang_tools.v
@@ -3,7 +3,7 @@ module vcode
import freeflowuniverse.herolib.mcp
import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.schemas.jsonschema
-import x.json2 {Any}
+import x.json2 { Any }
const get_function_from_file_tool = mcp.Tool{
name: 'get_function_from_file'
@@ -16,10 +16,10 @@ RETURNS: string - the function block including comments, or empty string if not
typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
})
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ typ: 'string'
})
}
required: ['file_path', 'function_name']
diff --git a/lib/mcp/vcode/logic/write_vfile_tool.v b/lib/mcp/vcode/logic/write_vfile_tool.v
index 861e652e..39c542a6 100644
--- a/lib/mcp/vcode/logic/write_vfile_tool.v
+++ b/lib/mcp/vcode/logic/write_vfile_tool.v
@@ -3,7 +3,7 @@ module vcode
import freeflowuniverse.herolib.mcp
import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.schemas.jsonschema
-import x.json2 {Any}
+import x.json2 { Any }
const write_vfile_tool = mcp.Tool{
name: 'write_vfile'
@@ -18,20 +18,20 @@ RETURNS: string - success message with the path of the written file'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
- 'path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'path': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
})
- 'code': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'code': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
})
- 'format': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'boolean'
+ 'format': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'boolean'
})
'overwrite': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'boolean'
+ typ: 'boolean'
})
- 'prefix': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string'
+ 'prefix': jsonschema.SchemaRef(jsonschema.Schema{
+ typ: 'string'
})
}
required: ['path', 'code']
@@ -41,31 +41,27 @@ RETURNS: string - success message with the path of the written file'
pub fn (d &VCode) write_vfile_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
code_str := arguments['code'].str()
-
+
// Parse optional parameters with defaults
format := if 'format' in arguments { arguments['format'].bool() } else { false }
overwrite := if 'overwrite' in arguments { arguments['overwrite'].bool() } else { false }
prefix := if 'prefix' in arguments { arguments['prefix'].str() } else { '' }
-
+
// Create write options
options := code.WriteOptions{
- format: format
+ format: format
overwrite: overwrite
- prefix: prefix
+ prefix: prefix
}
-
+
// Parse the V code string into a VFile
- vfile := code.parse_vfile(code_str) or {
- return mcp.error_tool_call_result(err)
- }
-
+ vfile := code.parse_vfile(code_str) or { return mcp.error_tool_call_result(err) }
+
// Write the VFile to the specified path
- vfile.write(path, options) or {
- return mcp.error_tool_call_result(err)
- }
-
+ vfile.write(path, options) or { return mcp.error_tool_call_result(err) }
+
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string]('Successfully wrote V file to ${path}')
+ content: mcp.result_to_mcp_tool_contents[string]('Successfully wrote V file to ${path}')
}
}
diff --git a/lib/mcp/vcode/mcp/handlers.v b/lib/mcp/vcode/mcp/handlers.v
index 01b01c59..770bab95 100644
--- a/lib/mcp/vcode/mcp/handlers.v
+++ b/lib/mcp/vcode/mcp/handlers.v
@@ -8,47 +8,47 @@ import os
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
-
+
// Check if path exists
if !os.exists(path) {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
+ content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
}
}
-
+
// Determine if path is a file or directory
is_directory := os.is_dir(path)
-
- mut message := ""
-
+
+ mut message := ''
+
if is_directory {
// Convert all pug files in the directory
pugconvert.convert_pug(path) or {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error converting pug files in directory: ${err}")
+ content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
}
}
message = "Successfully converted all pug files in directory '${path}'"
- } else if path.ends_with(".v") {
+ } else if path.ends_with('.v') {
// Convert a single pug file
pugconvert.convert_pug_file(path) or {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error converting pug file: ${err}")
+ content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
}
}
message = "Successfully converted pug file '${path}'"
} else {
return mcp.ToolCallResult{
is_error: true
- content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
+ content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
}
}
return mcp.ToolCallResult{
is_error: false
- content: mcp.result_to_mcp_tool_contents[string](message)
+ content: mcp.result_to_mcp_tool_contents[string](message)
}
}
diff --git a/lib/mcp/vcode/mcp/specifications.v b/lib/mcp/vcode/mcp/specifications.v
index 91ed9f1c..3990b577 100644
--- a/lib/mcp/vcode/mcp/specifications.v
+++ b/lib/mcp/vcode/mcp/specifications.v
@@ -1,18 +1,18 @@
module pugconvert
import freeflowuniverse.herolib.mcp
-import x.json2 as json { Any }
+import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.mcp.logger
const specs = mcp.Tool{
name: 'pugconvert'
description: 'Convert Pug template files to Jet template files'
- input_schema: jsonschema.Schema{
+ input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
- typ: 'string',
+ typ: 'string'
description: 'Path to a .pug file or directory containing .pug files to convert'
})
}
diff --git a/lib/osal/env.v b/lib/osal/env.v
index 9328de89..c9e54994 100644
--- a/lib/osal/env.v
+++ b/lib/osal/env.v
@@ -57,14 +57,13 @@ pub fn env_get(key string) !string {
}
pub fn env_exists(key string) !bool {
- k:=os.environ()
- if key in k{
+ k := os.environ()
+ if key in k {
return true
}
return false
}
-
// Returns the requested environment variable if it exists or returns the provided default value if it does not
pub fn env_get_default(key string, def string) string {
return os.environ()[key] or { return def }
diff --git a/lib/osal/startupmanager/startupmanager_test.v b/lib/osal/startupmanager/startupmanager_test.v
index a67feb60..f8f8029f 100644
--- a/lib/osal/startupmanager/startupmanager_test.v
+++ b/lib/osal/startupmanager/startupmanager_test.v
@@ -106,11 +106,11 @@ pub fn test_process_with_description() ! {
// Verify screen session
screen_factory.scan()!
-
+
if screen_factory.exists(process_desc_name) {
// Only test status if screen exists
mut screen_instance := screen_factory.get(process_desc_name)!
-
+
// Check status only if screen exists
status := screen_instance.status() or { screen.ScreenStatus.unknown }
println('Screen status: ${status}')
diff --git a/lib/vfs/vfs_local/vfs_implementation.v b/lib/vfs/vfs_local/vfs_implementation.v
index 087171a0..e642256e 100644
--- a/lib/vfs/vfs_local/vfs_implementation.v
+++ b/lib/vfs/vfs_local/vfs_implementation.v
@@ -286,17 +286,17 @@ pub fn (myvfs LocalVFS) file_concatenate(path string, data []u8) ! {
if os.is_dir(abs_path) {
return error('Cannot concatenate to directory: ${path}')
}
-
+
// Read existing content
existing_content := os.read_bytes(abs_path) or {
return error('Failed to read file ${path}: ${err}')
}
-
+
// Create a new buffer with the combined content
mut new_content := []u8{cap: existing_content.len + data.len}
new_content << existing_content
new_content << data
-
+
// Write back to file
os.write_file(abs_path, new_content.bytestr()) or {
return error('Failed to write concatenated data to file ${path}: ${err}')
@@ -314,13 +314,13 @@ pub fn (myvfs LocalVFS) get_path(entry &vfs.FSEntry) !string {
pub fn (myvfs LocalVFS) print() ! {
println('LocalVFS:')
println(' Root path: ${myvfs.root_path}')
-
+
// Print root directory contents
root_entries := myvfs.dir_list('') or {
println(' Error listing root directory: ${err}')
return
}
-
+
println(' Root entries: ${root_entries.len}')
for entry in root_entries {
metadata := entry.get_metadata()
diff --git a/lib/web/docusaurus/dsite_get.v b/lib/web/docusaurus/dsite_get.v
index 40cb4df1..f1f6d25e 100644
--- a/lib/web/docusaurus/dsite_get.v
+++ b/lib/web/docusaurus/dsite_get.v
@@ -74,7 +74,7 @@ pub fn (mut f DocusaurusFactory) get(args_ DSiteGetArgs) !&DocSite {
if args.init {
// Create docs directory if it doesn't exist in template or site
os.mkdir_all('${args.path}/docs')!
-
+
// Create a default docs/intro.md file
intro_content := '---
title: Introduction
diff --git a/lib/web/docusaurus/factory.v b/lib/web/docusaurus/factory.v
index b8a3ed71..f745294b 100644
--- a/lib/web/docusaurus/factory.v
+++ b/lib/web/docusaurus/factory.v
@@ -14,7 +14,7 @@ pub mut:
sites []&DocSite @[skip; str: skip]
path_build pathlib.Path
// path_publish pathlib.Path
- args DocusaurusArgs
+ args DocusaurusArgs
config Config // Stores configuration from HeroScript if provided
}
@@ -22,10 +22,10 @@ pub mut:
pub struct DocusaurusArgs {
pub mut:
// publish_path string
- build_path string
- production bool
- update bool
- heroscript string
+ build_path string
+ production bool
+ update bool
+ heroscript string
heroscript_path string
}
@@ -37,7 +37,7 @@ pub fn new(args_ DocusaurusArgs) !&DocusaurusFactory {
// if args.publish_path == ""{
// args.publish_path = "${os.home_dir()}/hero/var/docusaurus/publish"
// }
-
+
// Create the factory instance
mut ds := &DocusaurusFactory{
args: args_
@@ -48,21 +48,21 @@ pub fn new(args_ DocusaurusArgs) !&DocusaurusFactory {
// Process HeroScript
mut heroscript_text := args.heroscript
mut heroscript_path := args.heroscript_path
-
+
// If no heroscript is explicitly provided, check current directory
if heroscript_text == '' && heroscript_path == '' {
// First check if there's a .heroscript file in the current directory
current_dir := os.getwd()
- cfg_dir := os.join_path(current_dir, 'cfg')
+ cfg_dir := os.join_path(current_dir, 'cfg')
if os.exists(cfg_dir) {
heroscript_path = cfg_dir
}
}
-
+
// Process any HeroScript that was found
if heroscript_text != '' || heroscript_path != '' {
ds.config = play(
- heroscript: heroscript_text
+ heroscript: heroscript_text
heroscript_path: heroscript_path
)!
}
diff --git a/lib/web/docusaurus/play.v b/lib/web/docusaurus/play.v
index 4b510c12..ecca1b66 100644
--- a/lib/web/docusaurus/play.v
+++ b/lib/web/docusaurus/play.v
@@ -7,35 +7,35 @@ import os
@[params]
pub struct PlayArgs {
pub mut:
- heroscript string // if filled in then playbook will be made out of it
+ heroscript string // if filled in then playbook will be made out of it
heroscript_path string // path to a file containing heroscript
- plbook ?PlayBook
- reset bool
+ plbook ?PlayBook
+ reset bool
}
// Process the heroscript and return a filled Config object
pub fn play(args_ PlayArgs) !Config {
mut heroscript_text := args_.heroscript
-
+
// If heroscript_path is provided, read the script from the file
if args_.heroscript_path != '' && heroscript_text == '' {
heroscript_text = os.read_file(args_.heroscript_path) or {
return error('Failed to read heroscript from ${args_.heroscript_path}: ${err}')
}
}
-
+
// If no heroscript is provided, return an empty config
if heroscript_text == '' && args_.plbook == none {
- return Config{}
+ return Config{}
}
-
+
// Create playbook from the heroscript text
mut plbook := if pb := args_.plbook {
pb
} else {
playbook.new(text: heroscript_text)!
}
-
+
mut config := Config{}
play_config(mut plbook, mut config)!
@@ -45,7 +45,7 @@ pub fn play(args_ PlayArgs) !Config {
play_build_dest(mut plbook, mut config)!
play_navbar(mut plbook, mut config)!
play_footer(mut plbook, mut config)!
-
+
return config
}
@@ -55,7 +55,7 @@ fn play_config(mut plbook PlayBook, mut config Config) ! {
mut p := action.params
// Get optional name parameter or use base_url as fallback
name := p.get_default('name', 'docusaurus-site')!
-
+
config.main = Main{
name: name
title: p.get_default('title', 'Documentation Site')!
@@ -65,7 +65,8 @@ fn play_config(mut plbook PlayBook, mut config Config) ! {
url_home: p.get_default('url_home', 'docs/')!
base_url: p.get_default('base_url', '/')!
image: p.get_default('image', 'img/hero.png')!
- copyright: p.get_default('copyright', '© ' + time.now().year.str() + ' Example Organization')!
+ copyright: p.get_default('copyright', '© ' + time.now().year.str() +
+ ' Example Organization')!
}
}
}
diff --git a/lib/web/docusaurus/template.v b/lib/web/docusaurus/template.v
index 0dfc1455..5c9dd16b 100644
--- a/lib/web/docusaurus/template.v
+++ b/lib/web/docusaurus/template.v
@@ -67,7 +67,7 @@ fn (mut self DocusaurusFactory) generate_package_json() ! {
} else if self.config.navbar.title != '' {
name = self.config.navbar.title.to_lower().replace(' ', '-')
}
-
+
// Create the JSON structure manually
package_json := '{
"name": "${name}",
@@ -107,10 +107,12 @@ fn (mut self DocusaurusFactory) generate_package_json() ! {
"engines": {
"node": ">=18.0"
}
-}'
-
- // Write to file
- mut package_file := pathlib.get_file(path: os.join_path(self.path_build.path, 'package.json'), create: true)!
+}' // Write to file
+
+ mut package_file := pathlib.get_file(
+ path: os.join_path(self.path_build.path, 'package.json')
+ create: true
+ )!
package_file.write(package_json)!
}
@@ -125,9 +127,12 @@ fn (mut self DocusaurusFactory) generate_tsconfig_json() ! {
},
"include": ["src/**/*", "docusaurus.config.ts"]
}'
-
+
// Write to file
- mut tsconfig_file := pathlib.get_file(path: os.join_path(self.path_build.path, 'tsconfig.json'), create: true)!
+ mut tsconfig_file := pathlib.get_file(
+ path: os.join_path(self.path_build.path, 'tsconfig.json')
+ create: true
+ )!
tsconfig_file.write(tsconfig_json)!
}
@@ -153,7 +158,10 @@ const sidebars: SidebarsConfig = {
export default sidebars;
"
- mut sidebars_file := pathlib.get_file(path: os.join_path(self.path_build.path, 'sidebars.ts'), create: true)!
+ mut sidebars_file := pathlib.get_file(
+ path: os.join_path(self.path_build.path, 'sidebars.ts')
+ create: true
+ )!
sidebars_file.write(sidebar_content)!
}
@@ -161,11 +169,19 @@ export default sidebars;
fn (mut self DocusaurusFactory) generate_docusaurus_config_ts() ! {
// Use config values with fallbacks
title := if self.config.main.title != '' { self.config.main.title } else { 'Docusaurus Site' }
- tagline := if self.config.main.tagline != '' { self.config.main.tagline } else { 'Documentation Site' }
+ tagline := if self.config.main.tagline != '' {
+ self.config.main.tagline
+ } else {
+ 'Documentation Site'
+ }
url := if self.config.main.url != '' { self.config.main.url } else { 'https://example.com' }
base_url := if self.config.main.base_url != '' { self.config.main.base_url } else { '/' }
- favicon := if self.config.main.favicon != '' { self.config.main.favicon } else { 'img/favicon.png' }
-
+ favicon := if self.config.main.favicon != '' {
+ self.config.main.favicon
+ } else {
+ 'img/favicon.png'
+ }
+
// Format navbar items from config
mut navbar_items := []string{}
for item in self.config.navbar.items {
@@ -175,19 +191,19 @@ fn (mut self DocusaurusFactory) generate_docusaurus_config_ts() ! {
position: '${item.position}'
}"
}
-
+
navbar_items_str := navbar_items.join(',\n ')
-
+
// Generate footer links if available
mut footer_links := []string{}
for link in self.config.footer.links {
mut items := []string{}
for item in link.items {
- mut item_str := "{"
+ mut item_str := '{'
if item.label != '' {
item_str += "label: '${item.label}', "
}
-
+
// Ensure only one of 'to', 'href', or 'html' is used
// Priority: href > to > html
if item.href != '' {
@@ -198,11 +214,11 @@ fn (mut self DocusaurusFactory) generate_docusaurus_config_ts() ! {
// Default to linking to docs if nothing specified
item_str += "to: '/docs'"
}
-
- item_str += "}"
+
+ item_str += '}'
items << item_str
}
-
+
footer_links << "{
title: '${link.title}',
items: [
@@ -210,16 +226,16 @@ fn (mut self DocusaurusFactory) generate_docusaurus_config_ts() ! {
]
}"
}
-
+
footer_links_str := footer_links.join(',\n ')
-
+
// Year for copyright
year := time.now().year.str()
-
+
copyright := if self.config.main.copyright != '' {
self.config.main.copyright
} else {
- "Copyright © ${year} ${title}"
+ 'Copyright © ${year} ${title}'
}
// Construct the full config file content
@@ -295,7 +311,10 @@ const config: Config = {
export default config;
"
-
- mut config_file := pathlib.get_file(path: os.join_path(self.path_build.path, 'docusaurus.config.ts'), create: true)!
+
+ mut config_file := pathlib.get_file(
+ path: os.join_path(self.path_build.path, 'docusaurus.config.ts')
+ create: true
+ )!
config_file.write(config_content)!
}
diff --git a/vscodeplugin/heroscrypt-syntax/heroscript-syntax-0.0.1.vsix b/vscodeplugin/heroscrypt-syntax/heroscript-syntax-0.0.1.vsix
new file mode 100644
index 00000000..b9b2c65c
Binary files /dev/null and b/vscodeplugin/heroscrypt-syntax/heroscript-syntax-0.0.1.vsix differ
diff --git a/vscodeplugin/heroscrypt-syntax/language-configuration.json b/vscodeplugin/heroscrypt-syntax/language-configuration.json
new file mode 100644
index 00000000..3660815c
--- /dev/null
+++ b/vscodeplugin/heroscrypt-syntax/language-configuration.json
@@ -0,0 +1,35 @@
+{
+ "comments": {
+ "lineComment": "//"
+ },
+ "brackets": [
+ ["{", "}"],
+ ["[", "]"],
+ ["(", ")"]
+ ],
+ "autoClosingPairs": [
+ { "open": "{", "close": "}" },
+ { "open": "[", "close": "]" },
+ { "open": "(", "close": ")" },
+ { "open": "'", "close": "'", "notIn": ["string", "comment"] },
+ { "open": "\"", "close": "\"", "notIn": ["string"] }
+ ],
+ "surroundingPairs": [
+ ["{", "}"],
+ ["[", "]"],
+ ["(", ")"],
+ ["'", "'"],
+ ["\"", "\""]
+ ],
+ "folding": {
+ "markers": {
+ "start": "^!!",
+ "end": "(?=^!!|\\z)"
+ }
+ },
+ "wordPattern": "([A-Za-z][A-Za-z0-9_]*)|([^\\`\\~\\!\\@\\#\\%\\^\\&\\*\\(\\)\\-\\=\\+\\[\\{\\]\\}\\\\\\|\\;\\:\\'\\\"\\,\\.\\<\\>\\/\\?\\s]+)",
+ "indentationRules": {
+ "increaseIndentPattern": "^!!.*$",
+ "decreaseIndentPattern": "^(?!!).*$"
+ }
+}
\ No newline at end of file
diff --git a/vscodeplugin/heroscrypt-syntax/package.json b/vscodeplugin/heroscrypt-syntax/package.json
new file mode 100644
index 00000000..7934c38e
--- /dev/null
+++ b/vscodeplugin/heroscrypt-syntax/package.json
@@ -0,0 +1,22 @@
+{
+ "name": "heroscript-syntax",
+ "displayName": "HeroScript Syntax",
+ "description": "Syntax highlighting for HeroScript",
+ "version": "0.0.1",
+ "engines": {
+ "vscode": "^1.60.0"
+ },
+ "contributes": {
+ "languages": [{
+ "id": "heroscript",
+ "aliases": ["HeroScript", "heroscript"],
+ "extensions": [".hero",".heroscript"],
+ "configuration": "./language-configuration.json"
+ }],
+ "grammars": [{
+ "language": "heroscript",
+ "scopeName": "source.heroscript",
+ "path": "./syntaxes/heroscript.tmLanguage.json"
+ }]
+ }
+ }
\ No newline at end of file
diff --git a/vscodeplugin/heroscrypt-syntax/syntaxes/heroscript.tmLanguage.json b/vscodeplugin/heroscrypt-syntax/syntaxes/heroscript.tmLanguage.json
new file mode 100644
index 00000000..bf9a0543
--- /dev/null
+++ b/vscodeplugin/heroscrypt-syntax/syntaxes/heroscript.tmLanguage.json
@@ -0,0 +1,74 @@
+{
+ "$schema": "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json",
+ "name": "HeroScript",
+ "patterns": [
+ {
+ "include": "#actions"
+ },
+ {
+ "include": "#parameters"
+ },
+ {
+ "include": "#strings"
+ },
+ {
+ "include": "#comments"
+ }
+ ],
+ "repository": {
+ "actions": {
+ "patterns": [
+ {
+ "match": "^(!!)([\\w]+)(\\.)([\\w]+)",
+ "captures": {
+ "1": { "name": "keyword.control.heroscript" },
+ "2": { "name": "entity.name.class.heroscript" },
+ "3": { "name": "keyword.operator.heroscript" },
+ "4": { "name": "entity.name.function.heroscript" }
+ }
+ }
+ ]
+ },
+ "parameters": {
+ "patterns": [
+ {
+ "match": "^\\s*(\\w+)\\s*:",
+ "captures": {
+ "1": { "name": "variable.parameter.heroscript" }
+ }
+ }
+ ]
+ },
+ "strings": {
+ "patterns": [
+ {
+ "name": "string.quoted.single.heroscript",
+ "begin": "'",
+ "end": "'",
+ "patterns": [
+ {
+ "name": "constant.character.escape.heroscript",
+ "match": "\\\\."
+ }
+ ]
+ },
+ {
+ "name": "string.quoted.double.heroscript",
+ "begin": "\"",
+ "end": "\"",
+ "patterns": [
+ {
+ "name": "constant.character.escape.heroscript",
+ "match": "\\\\."
+ }
+ ]
+ }
+ ]
+ },
+ "comments": {
+ "name": "comment.line.double-slash.heroscript",
+ "match": "//.*$"
+ }
+ },
+ "scopeName": "source.heroscript"
+}
\ No newline at end of file
diff --git a/vscodeplugin/install_ubuntu.sh b/vscodeplugin/install_ubuntu.sh
new file mode 100644
index 00000000..dc21bdce
--- /dev/null
+++ b/vscodeplugin/install_ubuntu.sh
@@ -0,0 +1,3 @@
+apt-get install libsecret-1-dev -y
+apt install nodejs npm -y
+npx @vscode/vsce --version
\ No newline at end of file
diff --git a/vscodeplugin/package.sh b/vscodeplugin/package.sh
new file mode 100755
index 00000000..8ed10c25
--- /dev/null
+++ b/vscodeplugin/package.sh
@@ -0,0 +1,5 @@
+cd /root/code/github/freeflowuniverse/crystallib/vscodeplugin/heroscrypt-syntax
+npx @vscode/vsce package
+
+#code --install-extension /root/code/github/freeflowuniverse/crystallib/vscodeplugin/heroscrypt-syntax/heroscript-syntax-0.0.1.vsix
+
diff --git a/vscodeplugin/readme.md b/vscodeplugin/readme.md
new file mode 100644
index 00000000..3e5d9f3d
--- /dev/null
+++ b/vscodeplugin/readme.md
@@ -0,0 +1,8 @@
+
+
+you can go to extensions and install the vsx
+
+/root/code/github/freeflowuniverse/crystallib/vscodeplugin/heroscrypt-syntax/heroscript-syntax-0.0.1.vsix
+
+also in vscode go on right mouse and see how to install (starting from .vsix file)
+