This commit is contained in:
2025-05-04 08:19:47 +03:00
parent d8a59d0726
commit 46e1c6706c
177 changed files with 5708 additions and 5512 deletions

View File

@@ -5,25 +5,24 @@ module main
import freeflowuniverse.herolib.clients.openai
import os
fn test1(mut client openai.OpenAI)!{
instruction:='
fn test1(mut client openai.OpenAI) ! {
instruction := '
You are a template language converter. You convert Pug templates to Jet templates.
The target template language, Jet, is defined as follows:
'
// Create a chat completion request
res := client.chat_completion(msgs:openai.Messages{
messages: [
openai.Message{
role: .user
content: 'What are the key differences between Groq and other AI inference providers?'
},
]
})!
res := client.chat_completion(
msgs: openai.Messages{
messages: [
openai.Message{
role: .user
content: 'What are the key differences between Groq and other AI inference providers?'
},
]
}
)!
// Print the response
println('\nGroq AI Response:')
@@ -33,23 +32,21 @@ fn test1(mut client openai.OpenAI)!{
println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}')
println('Total tokens: ${res.usage.total_tokens}')
}
fn test2(mut client openai.OpenAI)!{
fn test2(mut client openai.OpenAI) ! {
// Create a chat completion request
res := client.chat_completion(
model:"deepseek-r1-distill-llama-70b",
msgs:openai.Messages{
messages: [
openai.Message{
role: .user
content: 'A story of 10 lines?'
},
]
})!
model: 'deepseek-r1-distill-llama-70b'
msgs: openai.Messages{
messages: [
openai.Message{
role: .user
content: 'A story of 10 lines?'
},
]
}
)!
println('\nGroq AI Response:')
println('==================')
@@ -57,21 +54,18 @@ fn test2(mut client openai.OpenAI)!{
println('\nUsage Statistics:')
println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}')
println('Total tokens: ${res.usage.total_tokens}')
println('Total tokens: ${res.usage.total_tokens}')
}
println('
println("
TO USE:
export AIKEY=\'gsk_...\'
export AIURL=\'https://api.groq.com/openai/v1\'
export AIMODEL=\'llama-3.3-70b-versatile\'
')
export AIKEY='gsk_...'
export AIURL='https://api.groq.com/openai/v1'
export AIMODEL='llama-3.3-70b-versatile'
")
mut client:=openai.get(name:"test")!
mut client := openai.get(name: 'test')!
println(client)
// test1(mut client)!
test2(mut client)!

View File

@@ -4,4 +4,4 @@ import freeflowuniverse.herolib.mcp.aitools
// aitools.convert_pug("/root/code/github/freeflowuniverse/herolauncher/pkg/herolauncher/web/templates/admin")!
aitools.convert_pug("/root/code/github/freeflowuniverse/herolauncher/pkg/zaz/webui/templates")!
aitools.convert_pug('/root/code/github/freeflowuniverse/herolauncher/pkg/zaz/webui/templates')!

View File

@@ -12,7 +12,7 @@ println('Starting Qdrant example script')
println('Current directory: ${os.getwd()}')
println('Home directory: ${os.home_dir()}')
mut i:=qdrant_installer.get()!
mut i := qdrant_installer.get()!
i.install()!
// 1. Get the qdrant client

View File

@@ -6,5 +6,3 @@ import freeflowuniverse.herolib.web.docusaurus
mut docs := docusaurus.new(
build_path: '/tmp/docusaurus_build'
)!

View File

@@ -90,14 +90,13 @@ fn main() {
'
mut docs := docusaurus.new(
build_path: os.join_path(os.home_dir(), 'hero/var/docusaurus_demo1')
update: true // Update the templates
heroscript: hero_script
build_path: os.join_path(os.home_dir(), 'hero/var/docusaurus_demo1')
update: true // Update the templates
heroscript: hero_script
) or {
eprintln('Error creating docusaurus factory with inline script: ${err}')
exit(1)
eprintln('Error creating docusaurus factory with inline script: ${err}')
exit(1)
}
// Create a site directory if it doesn't exist
site_path := os.join_path(os.home_dir(), 'hero/var/docusaurus_demo_src')
@@ -204,19 +203,19 @@ console.log(result);
eprintln('Error generating site: ${err}')
exit(1)
}
println('Site generated successfully!')
// Choose which operation to perform:
// Option 1: Run in development mode
// Option 1: Run in development mode
// This will start a development server in a screen session
println('Starting development server...')
site.dev() or {
eprintln('Error starting development server: ${err}')
exit(1)
}
// Option 2: Build for production (uncomment to use)
/*
println('Building site for production...')
@@ -236,4 +235,4 @@ console.log(result);
}
println('Site published successfully!')
*/
}
}

View File

@@ -6,35 +6,35 @@ import freeflowuniverse.herolib.clients.openai
@[params]
pub struct TaskParams {
pub:
name string
description string
name string
description string
}
// Create a new task
pub fn new_task(params TaskParams) &Task {
return &Task{
name: params.name
description: params.description
unit_tasks: []
current_result: ''
}
return &Task{
name: params.name
description: params.description
unit_tasks: []
current_result: ''
}
}
// Default model configurations
pub fn default_base_model() ModelConfig {
return ModelConfig{
name: 'qwen2.5-7b-instruct'
provider: 'openai'
temperature: 0.7
max_tokens: 2000
}
return ModelConfig{
name: 'qwen2.5-7b-instruct'
provider: 'openai'
temperature: 0.7
max_tokens: 2000
}
}
pub fn default_retry_model() ModelConfig {
return ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 4000
}
}
return ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 4000
}
}

View File

@@ -5,59 +5,58 @@ import freeflowuniverse.herolib.clients.openai
// ModelConfig defines the configuration for an AI model
pub struct ModelConfig {
pub mut:
name string
provider string
temperature f32
max_tokens int
name string
provider string
temperature f32
max_tokens int
}
// Create model configs
const claude_3_sonnet = escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
const claude_3_sonnet = ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
const gpt4 = escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
const gpt4 = ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Call an AI model using OpenRouter
fn call_ai_model(prompt string, model ModelConfig)! string {
// Get OpenAI client (configured for OpenRouter)
mut client := get_openrouter_client()!
// Create the message for the AI
mut m := openai.Messages{
messages: [
openai.Message{
role: .system
content: 'You are a helpful assistant.'
},
openai.Message{
role: .user
content: prompt
}
]
}
// Call the AI model
res := client.chat_completion(
msgs: m,
model: model.name,
temperature: model.temperature,
max_completion_tokens: model.max_tokens
)!
// Extract the response content
if res.choices.len > 0 {
return res.choices[0].message.content
}
return error('No response from AI model')
}
fn call_ai_model(prompt string, model ModelConfig) !string {
// Get OpenAI client (configured for OpenRouter)
mut client := get_openrouter_client()!
// Create the message for the AI
mut m := openai.Messages{
messages: [
openai.Message{
role: .system
content: 'You are a helpful assistant.'
},
openai.Message{
role: .user
content: prompt
},
]
}
// Call the AI model
res := client.chat_completion(
msgs: m
model: model.name
temperature: model.temperature
max_completion_tokens: model.max_tokens
)!
// Extract the response content
if res.choices.len > 0 {
return res.choices[0].message.content
}
return error('No response from AI model')
}

View File

@@ -5,19 +5,18 @@ import freeflowuniverse.herolib.osal
import os
// Get an OpenAI client configured for OpenRouter
fn get_openrouter_client()! &openai.OpenAI {
fn get_openrouter_client() !&openai.OpenAI {
osal.env_set(key: 'OPENROUTER_API_KEY', value: '')
// Get API key from environment variable
api_key := os.getenv('OPENROUTER_API_KEY')
if api_key == '' {
return error('OPENROUTER_API_KEY environment variable not set')
}
// Create OpenAI client with OpenRouter base URL
mut client := openai.get(
name: 'openrouter'
)!
return client
}
// Get API key from environment variable
api_key := os.getenv('OPENROUTER_API_KEY')
if api_key == '' {
return error('OPENROUTER_API_KEY environment variable not set')
}
// Create OpenAI client with OpenRouter base URL
mut client := openai.get(
name: 'openrouter'
)!
return client
}

View File

@@ -5,53 +5,61 @@ import log
// Task represents a complete AI task composed of multiple sequential unit tasks
pub struct Task {
pub mut:
name string
description string
unit_tasks []UnitTask
current_result string
name string
description string
unit_tasks []UnitTask
current_result string
}
// UnitTaskParams defines the parameters for creating a new unit task
@[params]
pub struct UnitTaskParams {
pub:
name string
prompt_function fn(string) string
callback_function fn(string)! string
base_model ?ModelConfig
retry_model ?ModelConfig
retry_count ?int
name string
prompt_function fn (string) string
callback_function fn (string) !string
base_model ?ModelConfig
retry_model ?ModelConfig
retry_count ?int
}
// Add a new unit task to the task
pub fn (mut t Task) new_unit_task(params UnitTaskParams) &UnitTask {
mut unit_task := UnitTask{
name: params.name
prompt_function: params.prompt_function
callback_function: params.callback_function
base_model: if base_model := params.base_model { base_model } else { default_base_model() }
retry_model: if retry_model := params.retry_model { retry_model } else { default_retry_model() }
retry_count: if retry_count := params.retry_count { retry_count } else { 3 }
}
t.unit_tasks << unit_task
return &t.unit_tasks[t.unit_tasks.len - 1]
mut unit_task := UnitTask{
name: params.name
prompt_function: params.prompt_function
callback_function: params.callback_function
base_model: if base_model := params.base_model {
base_model
} else {
default_base_model()
}
retry_model: if retry_model := params.retry_model {
retry_model
} else {
default_retry_model()
}
retry_count: if retry_count := params.retry_count { retry_count } else { 3 }
}
t.unit_tasks << unit_task
return &t.unit_tasks[t.unit_tasks.len - 1]
}
// Initiate the task execution
pub fn (mut t Task) initiate(input string)! string {
mut current_input := input
for i, mut unit_task in t.unit_tasks {
log.error('Executing unit task ${i+1}/${t.unit_tasks.len}: ${unit_task.name}')
// Execute the unit task with the current input
result := unit_task.execute(current_input)!
// Update the current input for the next unit task
current_input = result
t.current_result = result
}
return t.current_result
}
pub fn (mut t Task) initiate(input string) !string {
mut current_input := input
for i, mut unit_task in t.unit_tasks {
log.error('Executing unit task ${i + 1}/${t.unit_tasks.len}: ${unit_task.name}')
// Execute the unit task with the current input
result := unit_task.execute(current_input)!
// Update the current input for the next unit task
current_input = result
t.current_result = result
}
return t.current_result
}

View File

@@ -6,66 +6,66 @@ import freeflowuniverse.herolib.clients.openai
// UnitTask represents a single step in the task
pub struct UnitTask {
pub mut:
name string
prompt_function fn(string) string
callback_function fn(string)! string
base_model ModelConfig
retry_model ModelConfig
retry_count int
name string
prompt_function fn (string) string
callback_function fn (string) !string
base_model ModelConfig
retry_model ModelConfig
retry_count int
}
// Execute the unit task
pub fn (mut ut UnitTask) execute(input string)! string {
// Generate the prompt using the prompt function
prompt := ut.prompt_function(input)
// Try with the base model first
mut current_model := ut.base_model
mut attempts := 0
mut max_attempts := ut.retry_count + 1 // +1 for the initial attempt
mut absolute_max_attempts := 1 // Hard limit on total attempts
mut last_error := ''
for attempts < max_attempts && attempts < absolute_max_attempts {
attempts++
// If we've exhausted retries with the base model, switch to the retry model
if attempts > ut.retry_count {
log.error('Escalating to more powerful model: ${ut.retry_model.name}')
current_model = ut.retry_model
// Calculate remaining attempts but don't exceed absolute max
max_attempts = attempts + ut.retry_count
if max_attempts > absolute_max_attempts {
max_attempts = absolute_max_attempts
}
}
log.error('Attempt ${attempts} with model ${current_model.name}')
// Prepare the prompt with error feedback if this is a retry
mut current_prompt := prompt
if last_error != '' {
current_prompt = 'Previous attempt failed with error: ${last_error}\n\n${prompt}'
}
// Call the AI model
response := call_ai_model(current_prompt, current_model) or {
log.error('AI call failed: ${err}')
last_error = err.str()
continue // Try again
}
// Process the response with the callback function
result := ut.callback_function(response) or {
// If callback returns an error, retry with the error message
log.error('Callback returned error: ${err}')
last_error = err.str()
continue // Try again
}
// If we get here, the callback was successful
return result
}
return error('Failed to execute unit task after ${attempts} attempts. Last error: ${last_error}')
}
pub fn (mut ut UnitTask) execute(input string) !string {
// Generate the prompt using the prompt function
prompt := ut.prompt_function(input)
// Try with the base model first
mut current_model := ut.base_model
mut attempts := 0
mut max_attempts := ut.retry_count + 1 // +1 for the initial attempt
mut absolute_max_attempts := 1 // Hard limit on total attempts
mut last_error := ''
for attempts < max_attempts && attempts < absolute_max_attempts {
attempts++
// If we've exhausted retries with the base model, switch to the retry model
if attempts > ut.retry_count {
log.error('Escalating to more powerful model: ${ut.retry_model.name}')
current_model = ut.retry_model
// Calculate remaining attempts but don't exceed absolute max
max_attempts = attempts + ut.retry_count
if max_attempts > absolute_max_attempts {
max_attempts = absolute_max_attempts
}
}
log.error('Attempt ${attempts} with model ${current_model.name}')
// Prepare the prompt with error feedback if this is a retry
mut current_prompt := prompt
if last_error != '' {
current_prompt = 'Previous attempt failed with error: ${last_error}\n\n${prompt}'
}
// Call the AI model
response := call_ai_model(current_prompt, current_model) or {
log.error('AI call failed: ${err}')
last_error = err.str()
continue // Try again
}
// Process the response with the callback function
result := ut.callback_function(response) or {
// If callback returns an error, retry with the error message
log.error('Callback returned error: ${err}')
last_error = err.str()
continue // Try again
}
// If we get here, the callback was successful
return result
}
return error('Failed to execute unit task after ${attempts} attempts. Last error: ${last_error}')
}

View File

@@ -23,7 +23,7 @@ interface Backend {
tool_get(name string) !Tool
tool_list() ![]Tool
tool_call(name string, arguments map[string]json2.Any) !ToolCallResult
// Sampling methods
sampling_create_message(params map[string]json2.Any) !SamplingCreateMessageResult
mut:

View File

@@ -114,16 +114,14 @@ fn (b &MemoryBackend) prompt_messages_get(name string, arguments map[string]stri
return messages
}
fn (b &MemoryBackend) prompt_call(name string, arguments []string) ![]PromptMessage {
// Get the tool handler
handler := b.prompt_handlers[name] or { return error('tool handler not found') }
// Call the handler with the provided arguments
return handler(arguments) or {panic(err)}
return handler(arguments) or { panic(err) }
}
// Tool related methods
fn (b &MemoryBackend) tool_exists(name string) !bool {
@@ -165,11 +163,11 @@ fn (b &MemoryBackend) sampling_create_message(params map[string]json2.Any) !Samp
// Return a default implementation that just echoes back a message
// indicating that no sampling handler is registered
return SamplingCreateMessageResult{
model: 'default'
model: 'default'
stop_reason: 'endTurn'
role: 'assistant'
content: MessageContent{
typ: 'text'
role: 'assistant'
content: MessageContent{
typ: 'text'
text: 'Sampling is not configured on this server. Please register a sampling handler.'
}
}

View File

@@ -8,160 +8,165 @@ import freeflowuniverse.herolib.baobab.generator
import freeflowuniverse.herolib.baobab.specification
// generate_methods_file MCP Tool
//
//
const generate_methods_file_tool = mcp.Tool{
name: 'generate_methods_file'
description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
})}
required: ['source']
}
name: 'generate_methods_file'
description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'source': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
})
}
required: ['source']
}
}
pub fn (d &Baobab) generate_methods_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
source := json.decode[generator.Source](arguments["source"].str())!
result := generator.generate_methods_file_str(source)
or {
source := json.decode[generator.Source](arguments['source'].str())!
result := generator.generate_methods_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_module_from_openapi MCP Tool
const generate_module_from_openapi_tool = mcp.Tool{
name: 'generate_module_from_openapi'
description: ''
input_schema: jsonschema.Schema{
typ: 'object'
properties: {'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})}
required: ['openapi_path']
}
name: 'generate_module_from_openapi'
description: ''
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
required: ['openapi_path']
}
}
pub fn (d &Baobab) generate_module_from_openapi_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
openapi_path := arguments["openapi_path"].str()
result := generator.generate_module_from_openapi(openapi_path)
or {
openapi_path := arguments['openapi_path'].str()
result := generator.generate_module_from_openapi(openapi_path) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_methods_interface_file MCP Tool
const generate_methods_interface_file_tool = mcp.Tool{
name: 'generate_methods_interface_file'
description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
})}
required: ['source']
}
name: 'generate_methods_interface_file'
description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'source': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
})
}
required: ['source']
}
}
pub fn (d &Baobab) generate_methods_interface_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
source := json.decode[generator.Source](arguments["source"].str())!
result := generator.generate_methods_interface_file_str(source)
or {
source := json.decode[generator.Source](arguments['source'].str())!
result := generator.generate_methods_interface_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_model_file MCP Tool
const generate_model_file_tool = mcp.Tool{
name: 'generate_model_file'
description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
})}
required: ['source']
}
name: 'generate_model_file'
description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'source': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
})
}
required: ['source']
}
}
pub fn (d &Baobab) generate_model_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
source := json.decode[generator.Source](arguments["source"].str())!
result := generator.generate_model_file_str(source)
or {
source := json.decode[generator.Source](arguments['source'].str())!
result := generator.generate_model_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// generate_methods_example_file MCP Tool
const generate_methods_example_file_tool = mcp.Tool{
name: 'generate_methods_example_file'
description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
})}
required: ['source']
}
name: 'generate_methods_example_file'
description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'source': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
})
}
required: ['source']
}
}
pub fn (d &Baobab) generate_methods_example_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
source := json.decode[generator.Source](arguments["source"].str())!
result := generator.generate_methods_example_file_str(source)
or {
source := json.decode[generator.Source](arguments['source'].str())!
result := generator.generate_methods_example_file_str(source) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
}

View File

@@ -13,7 +13,7 @@ import os
fn test_generate_module_from_openapi_tool() {
// Verify the tool definition
assert generate_module_from_openapi_tool.name == 'generate_module_from_openapi', 'Tool name should be "generate_module_from_openapi"'
// Verify the input schema
assert generate_module_from_openapi_tool.input_schema.typ == 'object', 'Input schema type should be "object"'
assert 'openapi_path' in generate_module_from_openapi_tool.input_schema.properties, 'Input schema should have "openapi_path" property'
@@ -26,14 +26,14 @@ fn test_generate_module_from_openapi_tool_handler_error() {
// Create arguments with a non-existent file path
mut arguments := map[string]json2.Any{}
arguments['openapi_path'] = json2.Any('non_existent_file.yaml')
// Call the handler
result := generate_module_from_openapi_tool_handler(arguments) or {
// If the handler returns an error, that's expected
assert err.msg().contains(''), 'Error message should not be empty'
return
}
// If we get here, the handler should have returned an error result
assert result.is_error, 'Result should indicate an error'
assert result.content.len > 0, 'Error content should not be empty'
@@ -48,7 +48,7 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to create MCP server: ${err}'
return
}
// Create a temporary OpenAPI file for testing
temp_dir := os.temp_dir()
temp_file := os.join_path(temp_dir, 'test_openapi.yaml')
@@ -56,30 +56,30 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to create temporary file: ${err}'
return
}
// Sample tool call request
tool_call_request := '{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"generate_module_from_openapi","arguments":{"openapi_path":"${temp_file}"}}}'
// Process the request through the handler
response := server.handler.handle(tool_call_request) or {
// Clean up the temporary file
os.rm(temp_file) or {}
// If the handler returns an error, that's expected in this test environment
// since we might not have all dependencies set up
return
}
// Clean up the temporary file
os.rm(temp_file) or {}
// Decode the response to verify its structure
decoded_response := jsonrpc.decode_response(response) or {
// In a test environment, we might get an error due to missing dependencies
// This is acceptable for this test
return
}
// If we got a successful response, verify it
if !decoded_response.is_error() {
// Parse the result to verify its contents
@@ -87,15 +87,15 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to get result: ${err}'
return
}
// Decode the result to check the content
result_map := json2.raw_decode(result_json) or {
assert false, 'Failed to decode result: ${err}'
return
}.as_map()
// Verify the result structure
assert 'isError' in result_map, 'Result should have isError field'
assert 'content' in result_map, 'Result should have content field'
}
}
}

View File

@@ -2,22 +2,21 @@ module baobab
import cli
pub const command := cli.Command{
sort_flags: true
name: 'baobab'
pub const command = cli.Command{
sort_flags: true
name: 'baobab'
// execute: cmd_mcpgen
description: 'baobab command'
commands: [
commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the Baobab server'
}
},
]
}
fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server(&Baobab{})!
server.start()!
}
}

View File

@@ -67,7 +67,7 @@ fn test_mcp_server_initialize() {
// Verify the protocol version matches what was requested
assert result.protocol_version == '2024-11-05', 'Protocol version should match the request'
// Verify server info
assert result.server_info.name == 'developer', 'Server name should be "developer"'
}
@@ -113,7 +113,7 @@ fn test_tools_list() {
// Verify that the tools array exists and contains the expected tool
tools := result_map['tools'].arr()
assert tools.len > 0, 'Tools list should not be empty'
// Find the generate_module_from_openapi tool
mut found_tool := false
for tool in tools {
@@ -123,6 +123,6 @@ fn test_tools_list() {
break
}
}
assert found_tool, 'generate_module_from_openapi tool should be registered'
}

View File

@@ -13,18 +13,18 @@ pub fn new_mcp_server(v &Baobab) !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
'generate_module_from_openapi': generate_module_from_openapi_tool
'generate_methods_file': generate_methods_file_tool
'generate_module_from_openapi': generate_module_from_openapi_tool
'generate_methods_file': generate_methods_file_tool
'generate_methods_interface_file': generate_methods_interface_file_tool
'generate_model_file': generate_model_file_tool
'generate_methods_example_file': generate_methods_example_file_tool
'generate_model_file': generate_model_file_tool
'generate_methods_example_file': generate_methods_example_file_tool
}
tool_handlers: {
'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler
'generate_methods_file': v.generate_methods_file_tool_handler
'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler
'generate_methods_file': v.generate_methods_file_tool_handler
'generate_methods_interface_file': v.generate_methods_interface_file_tool_handler
'generate_model_file': v.generate_model_file_tool_handler
'generate_methods_example_file': v.generate_methods_example_file_tool_handler
'generate_model_file': v.generate_model_file_tool_handler
'generate_methods_example_file': v.generate_methods_example_file_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -35,4 +35,4 @@ pub fn new_mcp_server(v &Baobab) !&mcp.Server {
}
})!
return server
}
}

View File

@@ -13,20 +13,20 @@ prod_mode := fp.bool('prod', `p`, false, 'Build production version (optimized)')
help_requested := fp.bool('help', `h`, false, 'Show help message')
if help_requested {
println(fp.usage())
exit(0)
println(fp.usage())
exit(0)
}
additional_args := fp.finalize() or {
eprintln(err)
println(fp.usage())
exit(1)
eprintln(err)
println(fp.usage())
exit(1)
}
if additional_args.len > 0 {
eprintln('Unexpected arguments: ${additional_args.join(' ')}')
println(fp.usage())
exit(1)
eprintln('Unexpected arguments: ${additional_args.join(' ')}')
println(fp.usage())
exit(1)
}
// Change to the mcp directory
@@ -36,20 +36,20 @@ os.chdir(mcp_dir) or { panic('Failed to change directory to ${mcp_dir}: ${err}')
// Set MCPPATH based on OS
mut mcppath := '/usr/local/bin/mcp'
if os.user_os() == 'macos' {
mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp')
mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp')
}
// Set compilation command based on OS and mode
compile_cmd := if prod_mode {
'v -enable-globals -w -n -prod mcp.v'
'v -enable-globals -w -n -prod mcp.v'
} else {
'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v'
'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v'
}
println('Building MCP in ${if prod_mode { 'production' } else { 'debug' }} mode...')
if os.system(compile_cmd) != 0 {
panic('Failed to compile mcp.v with command: ${compile_cmd}')
panic('Failed to compile mcp.v with command: ${compile_cmd}')
}
// Make executable

View File

@@ -45,11 +45,11 @@ mcp
description: 'show verbose output'
})
mut cmd_inspector := cli.Command{
mut cmd_inspector := Command{
sort_flags: true
name: 'inspector'
execute: cmd_inspector_execute
description: 'will list existing mdbooks'
description: 'will list existing mdbooks'
}
cmd_inspector.add_flag(Flag{
@@ -68,7 +68,6 @@ mcp
description: 'open inspector'
})
cmd_mcp.add_command(rhai_mcp.command)
cmd_mcp.add_command(rust.command)
// cmd_mcp.add_command(baobab.command)
@@ -79,7 +78,7 @@ mcp
cmd_mcp.parse(os.args)
}
fn cmd_inspector_execute(cmd cli.Command) ! {
fn cmd_inspector_execute(cmd Command) ! {
open := cmd.flags.get_bool('open') or { false }
if open {
osal.exec(cmd: 'open http://localhost:5173')!
@@ -91,4 +90,4 @@ fn cmd_inspector_execute(cmd cli.Command) ! {
} else {
osal.exec(cmd: 'npx @modelcontextprotocol/inspector')!
}
}
}

View File

@@ -1,6 +1,5 @@
module mcp
pub fn result_to_mcp_tool_contents[T](result T) []ToolContent {
return [result_to_mcp_tool_content[T](result)]
}
@@ -50,4 +49,4 @@ pub fn array_to_mcp_tool_contents[U](array []U) []ToolContent {
contents << result_to_mcp_tool_content(item)
}
return contents
}
}

View File

@@ -110,7 +110,8 @@ fn (mut s Server) prompts_get_handler(data string) !string {
// messages := s.backend.prompt_messages_get(request.params.name, request.params.arguments)!
// Create a success response with the result
response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(), PromptGetResult{
response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(),
PromptGetResult{
description: prompt.description
messages: messages
})

View File

@@ -30,9 +30,9 @@ pub:
pub struct ModelPreferences {
pub:
hints []ModelHint
cost_priority f32 @[json: 'costPriority']
speed_priority f32 @[json: 'speedPriority']
hints []ModelHint
cost_priority f32 @[json: 'costPriority']
speed_priority f32 @[json: 'speedPriority']
intelligence_priority f32 @[json: 'intelligencePriority']
}
@@ -43,8 +43,8 @@ pub:
system_prompt string @[json: 'systemPrompt']
include_context string @[json: 'includeContext']
temperature f32
max_tokens int @[json: 'maxTokens']
stop_sequences []string @[json: 'stopSequences']
max_tokens int @[json: 'maxTokens']
stop_sequences []string @[json: 'stopSequences']
metadata map[string]json2.Any
}
@@ -63,21 +63,21 @@ fn (mut s Server) sampling_create_message_handler(data string) !string {
request_map := json2.raw_decode(data)!.as_map()
id := request_map['id'].int()
params_map := request_map['params'].as_map()
// Validate required parameters
if 'messages' !in params_map {
return jsonrpc.new_error_response(id, missing_required_argument('messages')).encode()
}
if 'maxTokens' !in params_map {
return jsonrpc.new_error_response(id, missing_required_argument('maxTokens')).encode()
}
// Call the backend to handle the sampling request
result := s.backend.sampling_create_message(params_map) or {
return jsonrpc.new_error_response(id, sampling_error(err.msg())).encode()
}
// Create a success response with the result
response := jsonrpc.new_response(id, json.encode(result))
return response.encode()
@@ -87,30 +87,30 @@ fn (mut s Server) sampling_create_message_handler(data string) !string {
fn parse_messages(messages_json json2.Any) ![]Message {
messages_arr := messages_json.arr()
mut result := []Message{cap: messages_arr.len}
for msg_json in messages_arr {
msg_map := msg_json.as_map()
if 'role' !in msg_map {
return error('Missing role in message')
}
if 'content' !in msg_map {
return error('Missing content in message')
}
role := msg_map['role'].str()
content_map := msg_map['content'].as_map()
if 'type' !in content_map {
return error('Missing type in message content')
}
typ := content_map['type'].str()
mut text := ''
mut data := ''
mut mimetype := ''
if typ == 'text' {
if 'text' !in content_map {
return error('Missing text in text content')
@@ -121,7 +121,7 @@ fn parse_messages(messages_json json2.Any) ![]Message {
return error('Missing data in image content')
}
data = content_map['data'].str()
if 'mimeType' !in content_map {
return error('Missing mimeType in image content')
}
@@ -129,17 +129,17 @@ fn parse_messages(messages_json json2.Any) ![]Message {
} else {
return error('Unsupported content type: ${typ}')
}
result << Message{
role: role
role: role
content: MessageContent{
typ: typ
text: text
data: data
typ: typ
text: text
data: data
mimetype: mimetype
}
}
}
return result
}

View File

@@ -26,8 +26,8 @@ pub:
pub struct ToolItems {
pub:
typ string @[json: 'type']
enum []string
typ string @[json: 'type']
enum []string
properties map[string]ToolProperty
}
@@ -63,7 +63,7 @@ fn (mut s Server) tools_list_handler(data string) !string {
// TODO: Implement pagination logic using the cursor
// For now, return all tools
encoded := json.encode(ToolListResult{
encoded := json.encode(ToolListResult{
tools: s.backend.tool_list()!
next_cursor: '' // Empty if no more pages
})
@@ -148,4 +148,4 @@ pub fn error_tool_call_result(err IError) ToolCallResult {
text: err.msg()
}]
}
}
}

View File

@@ -2,22 +2,21 @@ module mcpgen
import cli
pub const command := cli.Command{
sort_flags: true
name: 'mcpgen'
pub const command = cli.Command{
sort_flags: true
name: 'mcpgen'
// execute: cmd_mcpgen
description: 'will list existing mdbooks'
commands: [
commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the MCP server'
}
},
]
}
fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server(&MCPGen{})!
server.start()!
}
}

View File

@@ -7,7 +7,7 @@ import freeflowuniverse.herolib.schemas.jsonschema.codegen
import os
pub struct FunctionPointer {
name string // name of function
name string // name of function
module_path string // path to module
}
@@ -15,14 +15,14 @@ pub struct FunctionPointer {
// returns an MCP Tool code in v for attaching the function to the mcp server
// function_pointers: A list of function pointers to generate tools for
pub fn (d &MCPGen) create_mcp_tools_code(function_pointers []FunctionPointer) !string {
mut str := ""
mut str := ''
for function_pointer in function_pointers {
str += d.create_mcp_tool_code(function_pointer.name, function_pointer.module_path)!
}
return str
}
}
// create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
// returns an MCP Tool code in v for attaching the function to the mcp server
@@ -30,11 +30,10 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
if !os.exists(module_path) {
return error('Module path does not exist: ${module_path}')
}
function := code.get_function_from_module(module_path, function_name) or {
return error('Failed to get function ${function_name} from module ${module_path}\n${err}')
}
mut types := map[string]string{}
for param in function.params {
@@ -43,9 +42,9 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
types[param.typ.symbol()] = code.get_type_from_module(module_path, param.typ.symbol())!
}
}
// Get the result type if it's a struct
mut result_ := ""
mut result_ := ''
if function.result.typ is code.Result {
result_type := (function.result.typ as code.Result).typ
if result_type is code.Object {
@@ -60,7 +59,7 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
handler := d.create_mcp_tool_handler(function, types, result_)!
str := $tmpl('./templates/tool_code.v.template')
return str
}
}
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
// function: The V function string including preceding comments
@@ -68,7 +67,7 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
// result: The type of result of the create_mcp_tool function. Could be simply string, or struct {...}
pub fn (d &MCPGen) create_mcp_tool_handler(function code.Function, types map[string]string, result_ string) !string {
decode_stmts := function.params.map(argument_decode_stmt(it)).join_lines()
function_call := 'd.${function.name}(${function.params.map(it.name).join(',')})'
result := code.parse_type(result_)
str := $tmpl('./templates/tool_handler.v.template')
@@ -92,6 +91,7 @@ pub fn argument_decode_stmt(param code.Param) string {
panic('Unsupported type: ${param.typ}')
}
}
/*
in @generate_mcp.v , implement a create_mpc_tool_handler function that given a vlang function string and the types that map to their corresponding type definitions (for instance struct some_type: SomeType{...}), generates a vlang function such as the following:
@@ -103,7 +103,6 @@ pub fn (d &MCPGen) create_mcp_tool_tool_handler(arguments map[string]Any) !mcp.T
}
*/
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
// function: The V function string including preceding comments
// types: A map of struct names to their definitions for complex parameter types
@@ -111,14 +110,14 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// Create input schema for parameters
mut properties := map[string]jsonschema.SchemaRef{}
mut required := []string{}
for param in function.params {
// Add to required parameters
required << param.name
// Create property for this parameter
mut property := jsonschema.SchemaRef{}
// Check if this is a complex type defined in the types map
if param.typ.symbol() in types {
// Parse the struct definition to create a nested schema
@@ -133,21 +132,21 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// Handle primitive types
property = codegen.typesymbol_to_schema(param.typ.symbol())
}
properties[param.name] = property
}
// Create the input schema
input_schema := jsonschema.Schema{
typ: 'object',
properties: properties,
required: required
typ: 'object'
properties: properties
required: required
}
// Create and return the Tool
return mcp.Tool{
name: function.name,
description: function.description,
name: function.name
description: function.description
input_schema: input_schema
}
}
@@ -157,7 +156,7 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// // returns: A jsonschema.Schema for the given input type
// // errors: Returns an error if the input type is not supported
// pub fn (d MCPGen) create_mcp_tool_input_schema(input string) !jsonschema.Schema {
// // if input is a primitive type, return a mcp jsonschema.Schema with that type
// if input == 'string' {
// return jsonschema.Schema{
@@ -176,30 +175,30 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// typ: 'boolean'
// }
// }
// // if input is a struct, return a mcp jsonschema.Schema with typ 'object' and properties for each field in the struct
// if input.starts_with('pub struct ') {
// struct_name := input[11..].split(' ')[0]
// fields := parse_struct_fields(input)
// mut properties := map[string]jsonschema.Schema{}
// for field_name, field_type in fields {
// property := jsonschema.Schema{
// typ: d.create_mcp_tool_input_schema(field_type)!.typ
// }
// properties[field_name] = property
// }
// return jsonschema.Schema{
// typ: 'object',
// properties: properties
// }
// }
// // if input is an array, return a mcp jsonschema.Schema with typ 'array' and items of the item type
// if input.starts_with('[]') {
// item_type := input[2..]
// // For array types, we create a schema with type 'array'
// // The actual item type is determined by the primitive type
// mut item_type_str := 'string' // default
@@ -210,74 +209,73 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// } else if item_type == 'bool' {
// item_type_str = 'boolean'
// }
// // Create a property for the array items
// mut property := jsonschema.Schema{
// typ: 'array'
// }
// // Add the property to the schema
// mut properties := map[string]jsonschema.Schema{}
// properties['items'] = property
// return jsonschema.Schema{
// typ: 'array',
// properties: properties
// }
// }
// // Default to string type for unknown types
// return jsonschema.Schema{
// typ: 'string'
// }
// }
// parse_struct_fields parses a V language struct definition string and returns a map of field names to their types
fn parse_struct_fields(struct_def string) map[string]string {
mut fields := map[string]string{}
// Find the opening and closing braces of the struct definition
start_idx := struct_def.index('{') or { return fields }
end_idx := struct_def.last_index('}') or { return fields }
// Extract the content between the braces
struct_content := struct_def[start_idx + 1..end_idx].trim_space()
// Split the content by newlines to get individual field definitions
field_lines := struct_content.split('
')
for line in field_lines {
trimmed_line := line.trim_space()
// Skip empty lines and comments
if trimmed_line == '' || trimmed_line.starts_with('//') {
continue
}
// Handle pub: or mut: prefixes
mut field_def := trimmed_line
if field_def.starts_with('pub:') || field_def.starts_with('mut:') {
field_def = field_def.all_after(':').trim_space()
}
// Split by whitespace to separate field name and type
parts := field_def.split_any(' ')
if parts.len < 2 {
continue
}
field_name := parts[0]
field_type := parts[1..].join(' ')
// Handle attributes like @[json: 'name']
if field_name.contains('@[') {
continue
}
fields[field_name] = field_type
}
return fields
}

View File

@@ -12,42 +12,41 @@ import x.json2 as json { Any }
// function_pointers: A list of function pointers to generate tools for
const create_mcp_tools_code_tool = mcp.Tool{
name: 'create_mcp_tools_code'
description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
name: 'create_mcp_tools_code'
description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
returns an MCP Tool code in v for attaching the function to the mcp server
function_pointers: A list of function pointers to generate tools for'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'array'
items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
required: ['name', 'module_path']
}))
})
}
required: ['function_pointers']
}
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'array'
items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object'
properties: {
'name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
required: ['name', 'module_path']
}))
})
}
required: ['function_pointers']
}
}
pub fn (d &MCPGen) create_mcp_tools_code_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
function_pointers := json.decode[[]FunctionPointer](arguments["function_pointers"].str())!
result := d.create_mcp_tools_code(function_pointers)
or {
function_pointers := json.decode[[]FunctionPointer](arguments['function_pointers'].str())!
result := d.create_mcp_tools_code(function_pointers) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -59,10 +58,10 @@ returns an MCP Tool code in v for attaching the function to the mcp server'
typ: 'object'
properties: {
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
})
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
})
}
required: ['function_name', 'module_path']

View File

@@ -12,16 +12,16 @@ pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
'create_mcp_tool_code': create_mcp_tool_code_tool
'create_mcp_tool_const': create_mcp_tool_const_tool
'create_mcp_tool_code': create_mcp_tool_code_tool
'create_mcp_tool_const': create_mcp_tool_const_tool
'create_mcp_tool_handler': create_mcp_tool_handler_tool
'create_mcp_tools_code': create_mcp_tools_code_tool
'create_mcp_tools_code': create_mcp_tools_code_tool
}
tool_handlers: {
'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler
'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler
'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler
'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler
'create_mcp_tool_handler': v.create_mcp_tool_handler_tool_handler
'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler
'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -32,4 +32,4 @@ pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
}
})!
return server
}
}

View File

@@ -8,7 +8,7 @@ fn main() {
eprintln('Failed to create MCP server: ${err}')
return
}
// Start the server
server.start() or {
eprintln('Failed to start MCP server: ${err}')

View File

@@ -5,8 +5,7 @@ import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib
import json
pub fn convert_pug(mydir string)! {
pub fn convert_pug(mydir string) ! {
mut d := pathlib.get_dir(path: mydir, create: false)!
list := d.list(regex: [r'.*\.pug$'], include_links: false, files_only: true)!
for item in list.paths {
@@ -17,12 +16,12 @@ pub fn convert_pug(mydir string)! {
// extract_template parses AI response content to extract just the template
fn extract_template(raw_content string) string {
mut content := raw_content
// First check for </think> tag
if content.contains('</think>') {
content = content.split('</think>')[1].trim_space()
}
// Look for ```jet code block
if content.contains('```jet') {
parts := content.split('```jet')
@@ -39,7 +38,7 @@ fn extract_template(raw_content string) string {
// Take the content between the first set of ```
// This handles both ```content``` and cases where there's only an opening ```
content = parts[1].trim_space()
// If we only see an opening ``` but no closing, cleanup any remaining backticks
// to avoid incomplete formatting markers
if !content.contains('```') {
@@ -47,16 +46,16 @@ fn extract_template(raw_content string) string {
}
}
}
return content
}
pub fn convert_pug_file(myfile string)! {
pub fn convert_pug_file(myfile string) ! {
println(myfile)
// Create new file path by replacing .pug extension with .jet
jet_file := myfile.replace('.pug', '.jet')
// Check if jet file already exists, if so skip processing
mut jet_path_exist := pathlib.get_file(path: jet_file, create: false)!
if jet_path_exist.exists() {
@@ -69,7 +68,7 @@ pub fn convert_pug_file(myfile string)! {
mut l := loader()
mut client := openai.get()!
base_instruction := '
You are a template language converter. You convert Pug templates to Jet templates.
@@ -82,25 +81,24 @@ pub fn convert_pug_file(myfile string)! {
only output the resulting template, no explanation, no steps, just the jet template
'
// We'll retry up to 5 times if validation fails
max_attempts := 5
mut attempts := 0
mut is_valid := false
mut error_message := ''
mut template := ''
for attempts < max_attempts && !is_valid {
attempts++
mut system_content := texttools.dedent(base_instruction) + "\n" + l.jet()
mut system_content := texttools.dedent(base_instruction) + '\n' + l.jet()
mut user_prompt := ''
// Create different prompts for first attempt vs retries
if attempts == 1 {
// First attempt - convert from PUG
user_prompt = texttools.dedent(base_user_prompt) + "\n" + content
user_prompt = texttools.dedent(base_user_prompt) + '\n' + content
// Print what we're sending to the AI service
println('Sending to OpenAI for conversion:')
println('--------------------------------')
@@ -127,53 +125,57 @@ Please fix the template and try again. Learn from feedback and check which jet t
Return only the corrected Jet template.
Dont send back more information than the fixed template, make sure its in jet format.
'
// Print what we're sending for the retry
' // Print what we're sending for the retry
println('Sending to OpenAI for correction:')
println('--------------------------------')
println(user_prompt)
println('--------------------------------')
}
mut m := openai.Messages{
messages: [
openai.Message{
role: .system
content: system_content
},
},
openai.Message{
role: .user
content: user_prompt
},
]}
]
}
// Create a chat completion request
res := client.chat_completion(msgs: m, model: "deepseek-r1-distill-llama-70b", max_completion_tokens: 64000)!
println("-----")
res := client.chat_completion(
msgs: m
model: 'deepseek-r1-distill-llama-70b'
max_completion_tokens: 64000
)!
println('-----')
// Print AI response before extraction
println('Response received from AI:')
println('--------------------------------')
println(res.choices[0].message.content)
println('--------------------------------')
// Extract the template from the AI response
template = extract_template(res.choices[0].message.content)
println('Extracted template for ${myfile}:')
println('--------------------------------')
println(template)
println('--------------------------------')
// Validate the template
validation_result := jetvaliditycheck(template) or {
// If validation service is unavailable, we'll just proceed with the template
println('Warning: Template validation service unavailable: ${err}')
break
}
// Check if template is valid
if validation_result.is_valid {
is_valid = true
@@ -183,19 +185,19 @@ Dont send back more information than the fixed template, make sure its in jet fo
println('Template validation failed: ${error_message}')
}
}
// Report the validation outcome
if is_valid {
println('Successfully converted template after ${attempts} attempt(s)')
// Create the file and write the processed content
println("Converted to: ${jet_file}")
println('Converted to: ${jet_file}')
mut jet_path := pathlib.get_file(path: jet_file, create: true)!
jet_path.write(template)!
jet_path.write(template)!
} else if attempts >= max_attempts {
println('Warning: Could not validate template after ${max_attempts} attempts')
println('Using best attempt despite validation errors: ${error_message}')
jet_file2:=jet_file.replace(".jet","_error.jet")
jet_file2 := jet_file.replace('.jet', '_error.jet')
mut jet_path2 := pathlib.get_file(path: jet_file2, create: true)!
jet_path2.write(template)!
jet_path2.write(template)!
}
}

View File

@@ -5,9 +5,9 @@ import json
// JetTemplateResponse is the expected response structure from the validation service
struct JetTemplateResponse {
valid bool
message string
error string
valid bool
message string
error string
}
// ValidationResult represents the result of a template validation
@@ -30,7 +30,7 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
template_data := json.encode({
'template': jetcontent
})
// Print what we're sending to the AI service
// println('Sending to JET validation service:')
// println('--------------------------------')
@@ -39,8 +39,8 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// Send the POST request to the validation endpoint
req := httpconnection.Request{
prefix: 'checkjet',
data: template_data,
prefix: 'checkjet'
data: template_data
dataformat: .json
}
@@ -49,7 +49,7 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// Handle connection errors
return ValidationResult{
is_valid: false
error: 'Connection error: ${err}'
error: 'Connection error: ${err}'
}
}
@@ -58,12 +58,12 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// If we can't parse JSON using our struct, the server didn't return the expected format
return ValidationResult{
is_valid: false
error: 'Server returned unexpected format: ${err.msg()}'
error: 'Server returned unexpected format: ${err.msg()}'
}
}
// Use the structured response data
if response.valid == false{
if response.valid == false {
error_msg := if response.error != '' {
response.error
} else if response.message != '' {
@@ -74,12 +74,12 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
return ValidationResult{
is_valid: false
error: error_msg
error: error_msg
}
}
return ValidationResult{
is_valid: true
error: ''
error: ''
}
}

View File

@@ -10,12 +10,11 @@ pub mut:
}
fn (mut loader FileLoader) load() {
loader.embedded_files["jet"]=$embed_file('templates/jet_instructions.md')
loader.embedded_files['jet'] = $embed_file('templates/jet_instructions.md')
}
fn (mut loader FileLoader) jet() string {
c:=loader.embedded_files["jet"] or { panic("bug embed") }
c := loader.embedded_files['jet'] or { panic('bug embed') }
return c.to_string()
}
@@ -23,4 +22,4 @@ fn loader() FileLoader {
mut loader := FileLoader{}
loader.load()
return loader
}
}

View File

@@ -8,47 +8,47 @@ import os
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
// Check if path exists
if !os.exists(path) {
return mcp.ToolCallResult{
is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
}
}
// Determine if path is a file or directory
is_directory := os.is_dir(path)
mut message := ""
mut message := ''
if is_directory {
// Convert all pug files in the directory
pugconvert.convert_pug(path) or {
return mcp.ToolCallResult{
is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error converting pug files in directory: ${err}")
content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
}
}
message = "Successfully converted all pug files in directory '${path}'"
} else if path.ends_with(".pug") {
} else if path.ends_with('.pug') {
// Convert a single pug file
pugconvert.convert_pug_file(path) or {
return mcp.ToolCallResult{
is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error converting pug file: ${err}")
content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
}
}
message = "Successfully converted pug file '${path}'"
} else {
return mcp.ToolCallResult{
is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
}
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](message)
content: mcp.result_to_mcp_tool_contents[string](message)
}
}

View File

@@ -1,18 +1,18 @@
module mcp
import freeflowuniverse.herolib.ai.mcp
import x.json2 as json { Any }
import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.ai.mcp.logger
const specs = mcp.Tool{
name: 'pugconvert'
description: 'Convert Pug template files to Jet template files'
input_schema: jsonschema.Schema{
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string',
typ: 'string'
description: 'Path to a .pug file or directory containing .pug files to convert'
})
}

View File

@@ -9,7 +9,7 @@ fn main() {
log.error('Failed to create MCP server: ${err}')
return
}
// Start the server
server.start() or {
log.error('Failed to start MCP server: ${err}')

View File

@@ -4,163 +4,175 @@ import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
import os
fn main() {
// Get the current directory
current_dir := os.dir(@FILE)
// Check if a source code path was provided as an argument
if os.args.len < 2 {
println('Please provide the path to the source code directory as an argument')
println('Example: ./example.vsh /path/to/source/code/directory')
return
}
// Get the source code path from the command line arguments
source_code_path := os.args[1]
// Check if the path exists and is a directory
if !os.exists(source_code_path) {
println('Source code path does not exist: ${source_code_path}')
return
}
if !os.is_dir(source_code_path) {
println('Source code path is not a directory: ${source_code_path}')
return
}
// Get all Rust files in the directory
files := os.ls(source_code_path) or {
println('Failed to list files in directory: ${err}')
return
}
// Combine all Rust files into a single source code string
mut source_code := ''
for file in files {
file_path := os.join_path(source_code_path, file)
// Skip directories and non-Rust files
if os.is_dir(file_path) || !file.ends_with('.rs') {
continue
}
// Read the file content
file_content := os.read_file(file_path) or {
println('Failed to read file ${file_path}: ${err}')
continue
}
// Add file content to the combined source code
source_code += '// File: ${file}\n${file_content}\n\n'
}
if source_code == '' {
println('No Rust files found in directory: ${source_code_path}')
return
}
// Read the rhaiwrapping.md file
rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
println('Failed to read rhaiwrapping.md: ${err}')
return
}
// Determine the crate path from the source code path
// Extract the path relative to the src directory
src_index := source_code_path.index('src/') or {
println('Could not determine crate path: src/ not found in path')
return
}
mut path_parts := source_code_path[src_index+4..].split('/')
// Remove the last part (the file name)
if path_parts.len > 0 {
path_parts.delete_last()
}
rel_path := path_parts.join('::')
crate_path := 'sal::${rel_path}'
// Create a new task
mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
)
// Create model configs
sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
gpt4_model := escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Extract the module name from the directory path (last component)
dir_parts := source_code_path.split('/')
name := dir_parts[dir_parts.len - 1]
// Create the prompt with source code, wrapper example, and rhai_wrapping_md
prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or { '' }, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or { '' }, crate_path)
// Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string {
return prompt_content
}
// Get the current directory
current_dir := os.dir(@FILE)
gen := RhaiGen{
name: name
dir: source_code_path
}
// Define a single unit task that handles everything
task.new_unit_task(
name: 'create_rhai_wrappers'
prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers
base_model: sonnet_model
retry_model: gpt4_model
retry_count: 1
)
// Initiate the task
result := task.initiate('') or {
println('Task failed: ${err}')
return
}
println('Task completed successfully')
println('The wrapper files have been generated and compiled in the target directory.')
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
// Check if a source code path was provided as an argument
if os.args.len < 2 {
println('Please provide the path to the source code directory as an argument')
println('Example: ./example.vsh /path/to/source/code/directory')
return
}
// Get the source code path from the command line arguments
source_code_path := os.args[1]
// Check if the path exists and is a directory
if !os.exists(source_code_path) {
println('Source code path does not exist: ${source_code_path}')
return
}
if !os.is_dir(source_code_path) {
println('Source code path is not a directory: ${source_code_path}')
return
}
// Get all Rust files in the directory
files := os.ls(source_code_path) or {
println('Failed to list files in directory: ${err}')
return
}
// Combine all Rust files into a single source code string
mut source_code := ''
for file in files {
file_path := os.join_path(source_code_path, file)
// Skip directories and non-Rust files
if os.is_dir(file_path) || !file.ends_with('.rs') {
continue
}
// Read the file content
file_content := os.read_file(file_path) or {
println('Failed to read file ${file_path}: ${err}')
continue
}
// Add file content to the combined source code
source_code += '// File: ${file}\n${file_content}\n\n'
}
if source_code == '' {
println('No Rust files found in directory: ${source_code_path}')
return
}
// Read the rhaiwrapping.md file
rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
println('Failed to read rhaiwrapping.md: ${err}')
return
}
// Determine the crate path from the source code path
// Extract the path relative to the src directory
src_index := source_code_path.index('src/') or {
println('Could not determine crate path: src/ not found in path')
return
}
mut path_parts := source_code_path[src_index + 4..].split('/')
// Remove the last part (the file name)
if path_parts.len > 0 {
path_parts.delete_last()
}
rel_path := path_parts.join('::')
crate_path := 'sal::${rel_path}'
// Create a new task
mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
)
// Create model configs
sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
gpt4_model := escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Extract the module name from the directory path (last component)
dir_parts := source_code_path.split('/')
name := dir_parts[dir_parts.len - 1]
// Create the prompt with source code, wrapper example, and rhai_wrapping_md
prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or {
''
}, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or {
''
}, crate_path)
// Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string {
return prompt_content
}
gen := RhaiGen{
name: name
dir: source_code_path
}
// Define a single unit task that handles everything
task.new_unit_task(
name: 'create_rhai_wrappers'
prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers
base_model: sonnet_model
retry_model: gpt4_model
retry_count: 1
)
// Initiate the task
result := task.initiate('') or {
println('Task failed: ${err}')
return
}
println('Task completed successfully')
println('The wrapper files have been generated and compiled in the target directory.')
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
}
// Define the prompt functions
fn separate_functions(input string) string {
return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}'
return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}'
}
fn create_wrappers(input string) string {
return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}'
return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}'
}
fn create_example(input string) string {
return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}'
return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}'
}
// Define a Rhai wrapper generator function for Container functions
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or { panic('Failed to read guides') }
engine := $tmpl('./prompts/engine.md')
vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or { panic('Failed to read guides') }
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or { panic('Failed to read guides') }
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or { panic('Failed to read guides') }
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or {
panic('Failed to read guides')
}
engine := $tmpl('./prompts/engine.md')
vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or {
panic('Failed to read guides')
}
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or {
panic('Failed to read guides')
}
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or {
panic('Failed to read guides')
}
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
${guides}
${vector_vs_array}
${example_rhai}
@@ -267,263 +279,254 @@ your engine create function is called `create_rhai_engine`
@[params]
pub struct WrapperModule {
pub:
lib_rs string
example_rs string
engine_rs string
cargo_toml string
example_rhai string
generic_wrapper_rs string
wrapper_rs string
lib_rs string
example_rs string
engine_rs string
cargo_toml string
example_rhai string
generic_wrapper_rs string
wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string)! string {
// Define project directory paths
name := name_
project_dir := '${base_dir}/rhai'
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(base_dir) or {
return error('Failed to change directory to base directory: ${err}')
}
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or {
return error('Failed to create examples directory: ${err}')
}
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
}
fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
// Define project directory paths
name := name_
project_dir := '${base_dir}/rhai'
// Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
return error('Failed to write wrapper.rs: ${err}')
}
}
// Write the generic wrapper.rs file
if wrapper.generic_wrapper_rs != '' {
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}')
}
}
// Write the example.rs file
if wrapper.example_rs != '' {
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
if wrapper.cargo_toml != '' {
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
}
// Write the example.rhai file if provided
if wrapper.example_rhai != '' {
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
}
return project_dir
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
}
// Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
return error('Failed to write wrapper.rs: ${err}')
}
}
// Write the generic wrapper.rs file
if wrapper.generic_wrapper_rs != '' {
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}')
}
}
// Write the example.rs file
if wrapper.example_rs != '' {
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
if wrapper.cargo_toml != '' {
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
}
// Write the example.rhai file if provided
if wrapper.example_rhai != '' {
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
}
return project_dir
}
// Helper function to extract code blocks from the response
fn extract_code_block(response string, identifier string, language string) string {
// Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}'
if language == '' {
start_marker = '```\n// ${identifier}'
}
start_index := response.index(start_marker) or {
// Try alternative format
mut alt_marker := '```${language}\n${identifier}'
if language == '' {
alt_marker = '```\n${identifier}'
}
response.index(alt_marker) or {
return ''
}
}
// Find the end marker
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or {
return ''
}
// Extract the content between the markers
content_start := start_index + start_marker.len
content := response[content_start..end_index].trim_space()
return content
// Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}'
if language == '' {
start_marker = '```\n// ${identifier}'
}
start_index := response.index(start_marker) or {
// Try alternative format
mut alt_marker := '```${language}\n${identifier}'
if language == '' {
alt_marker = '```\n${identifier}'
}
response.index(alt_marker) or { return '' }
}
// Find the end marker
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
// Extract the content between the markers
content_start := start_index + start_marker.len
content := response[content_start..end_index].trim_space()
return content
}
// Extract module name from wrapper code
fn extract_module_name(code string) string {
lines := code.split('\n')
for line in lines {
// Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name
mut parts := []string{}
if line.contains('pub mod ') {
parts = line.split('pub mod ')
} else {
parts = line.split('mod ')
}
if parts.len > 1 {
// Extract the module name and remove any trailing characters
mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space()
if name != '' {
return name
}
}
}
}
return ''
lines := code.split('\n')
for line in lines {
// Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name
mut parts := []string{}
if line.contains('pub mod ') {
parts = line.split('pub mod ')
} else {
parts = line.split('mod ')
}
if parts.len > 1 {
// Extract the module name and remove any trailing characters
mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space()
if name != '' {
return name
}
}
}
}
return ''
}
struct RhaiGen {
name string
dir string
name string
dir string
}
// Define the callback function that processes the response and compiles the code
fn (gen RhaiGen)process_rhai_wrappers(response string)! string {
// Extract wrapper.rs content
wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
}
// Extract engine.rs content
mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' {
// Try to extract from the response without explicit language marker
engine_rs_content = extract_code_block(response, 'engine.rs', '')
// if engine_rs_content == '' {
// // Use the template engine.rs
// engine_rs_content = $tmpl('./templates/engine.rs')
// }
}
// Extract example.rhai content
mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' {
// Try to extract from the response without explicit language marker
example_rhai_content = extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' {
// Use the example from the template
example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
return error('Failed to read example.rhai template: ${err}')
}
// Extract the code block from the markdown file
example_rhai_content = extract_code_block(example_script_md, 'example.rhai', 'rhai')
if example_rhai_content == '' {
return error('Failed to extract example.rhai from template file')
}
}
}
// Extract function names from the wrapper.rs content
functions := extract_functions_from_code(wrapper_rs_content)
println('Using module name: ${gen.name}_rhai')
println('Extracted functions: ${functions.join(", ")}')
name := gen.name
// Create a WrapperModule struct with the extracted content
wrapper := WrapperModule{
lib_rs: $tmpl('./templates/lib.rs')
wrapper_rs: wrapper_rs_content
example_rs: $tmpl('./templates/example.rs')
engine_rs: engine_rs_content
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
cargo_toml: $tmpl('./templates/cargo.toml')
example_rhai: example_rhai_content
}
// Create the wrapper module
base_target_dir := gen.dir
project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or {
return error('Failed to create wrapper module: ${err}')
}
// Run the example
os.chdir(project_dir) or {
return error('Failed to change directory to project: ${err}')
}
// Run cargo build first
build_result := os.execute('cargo build')
if build_result.exit_code != 0 {
return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
}
// Run the example
run_result := os.execute('cargo run --example example')
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
// Extract wrapper.rs content
wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
}
// Extract engine.rs content
mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' {
// Try to extract from the response without explicit language marker
engine_rs_content = extract_code_block(response, 'engine.rs', '')
// if engine_rs_content == '' {
// // Use the template engine.rs
// engine_rs_content = $tmpl('./templates/engine.rs')
// }
}
// Extract example.rhai content
mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' {
// Try to extract from the response without explicit language marker
example_rhai_content = extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' {
// Use the example from the template
example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
return error('Failed to read example.rhai template: ${err}')
}
// Extract the code block from the markdown file
example_rhai_content = extract_code_block(example_script_md, 'example.rhai',
'rhai')
if example_rhai_content == '' {
return error('Failed to extract example.rhai from template file')
}
}
}
// Extract function names from the wrapper.rs content
functions := extract_functions_from_code(wrapper_rs_content)
println('Using module name: ${gen.name}_rhai')
println('Extracted functions: ${functions.join(', ')}')
name := gen.name
// Create a WrapperModule struct with the extracted content
wrapper := WrapperModule{
lib_rs: $tmpl('./templates/lib.rs')
wrapper_rs: wrapper_rs_content
example_rs: $tmpl('./templates/example.rs')
engine_rs: engine_rs_content
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
cargo_toml: $tmpl('./templates/cargo.toml')
example_rhai: example_rhai_content
}
// Create the wrapper module
base_target_dir := gen.dir
project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or {
return error('Failed to create wrapper module: ${err}')
}
// Run the example
os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
// Run cargo build first
build_result := os.execute('cargo build')
if build_result.exit_code != 0 {
return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
}
// Run the example
run_result := os.execute('cargo run --example example')
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
}
// Extract function names from wrapper code
fn extract_functions_from_code(code string) []string {
mut functions := []string{}
lines := code.split('\n')
for line in lines {
if line.contains('pub fn ') && !line.contains('//') {
// Extract function name
parts := line.split('pub fn ')
if parts.len > 1 {
name_parts := parts[1].split('(')
if name_parts.len > 0 {
fn_name := name_parts[0].trim_space()
if fn_name != '' {
functions << fn_name
}
}
}
}
}
return functions
}
mut functions := []string{}
lines := code.split('\n')
for line in lines {
if line.contains('pub fn ') && !line.contains('//') {
// Extract function name
parts := line.split('pub fn ')
if parts.len > 1 {
name_parts := parts[1].split('(')
if name_parts.len > 0 {
fn_name := name_parts[0].trim_space()
if fn_name != '' {
functions << fn_name
}
}
}
}
}
return functions
}

View File

@@ -4,209 +4,204 @@ import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
import os
fn main() {
// Get the current directory where this script is located
current_dir := os.dir(@FILE)
// Validate command line arguments
source_code_path := validate_command_args() or {
println(err)
return
}
// Read and combine all Rust files in the source directory
source_code := read_source_code(source_code_path) or {
println(err)
return
}
// Determine the crate path from the source code path
crate_path := determine_crate_path(source_code_path) or {
println(err)
return
}
// Extract the module name from the directory path (last component)
name := extract_module_name_from_path(source_code_path)
// Create the prompt content for the AI
prompt_content := create_rhai_wrappers(
name,
source_code,
read_file_safely('${current_dir}/prompts/example_script.md'),
read_file_safely('${current_dir}/prompts/wrapper.md'),
read_file_safely('${current_dir}/prompts/errors.md'),
crate_path
)
// Create the generator instance
gen := RhaiGen{
name: name
dir: source_code_path
}
// Run the task to generate Rhai wrappers
run_wrapper_generation_task(prompt_content, gen) or {
println('Task failed: ${err}')
return
}
println('Task completed successfully')
println('The wrapper files have been generated and compiled in the target directory.')
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
// Get the current directory where this script is located
current_dir := os.dir(@FILE)
// Validate command line arguments
source_code_path := validate_command_args() or {
println(err)
return
}
// Read and combine all Rust files in the source directory
source_code := read_source_code(source_code_path) or {
println(err)
return
}
// Determine the crate path from the source code path
crate_path := determine_crate_path(source_code_path) or {
println(err)
return
}
// Extract the module name from the directory path (last component)
name := extract_module_name_from_path(source_code_path)
// Create the prompt content for the AI
prompt_content := create_rhai_wrappers(name, source_code, read_file_safely('${current_dir}/prompts/example_script.md'),
read_file_safely('${current_dir}/prompts/wrapper.md'), read_file_safely('${current_dir}/prompts/errors.md'),
crate_path)
// Create the generator instance
gen := RhaiGen{
name: name
dir: source_code_path
}
// Run the task to generate Rhai wrappers
run_wrapper_generation_task(prompt_content, gen) or {
println('Task failed: ${err}')
return
}
println('Task completed successfully')
println('The wrapper files have been generated and compiled in the target directory.')
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
}
// Validates command line arguments and returns the source code path
fn validate_command_args() !string {
if os.args.len < 2 {
return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory')
}
source_code_path := os.args[1]
if !os.exists(source_code_path) {
return error('Source code path does not exist: ${source_code_path}')
}
if !os.is_dir(source_code_path) {
return error('Source code path is not a directory: ${source_code_path}')
}
return source_code_path
if os.args.len < 2 {
return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory')
}
source_code_path := os.args[1]
if !os.exists(source_code_path) {
return error('Source code path does not exist: ${source_code_path}')
}
if !os.is_dir(source_code_path) {
return error('Source code path is not a directory: ${source_code_path}')
}
return source_code_path
}
// Reads and combines all Rust files in the given directory
fn read_source_code(source_code_path string) !string {
// Get all files in the directory
files := os.ls(source_code_path) or {
return error('Failed to list files in directory: ${err}')
}
// Combine all Rust files into a single source code string
mut source_code := ''
for file in files {
file_path := os.join_path(source_code_path, file)
// Skip directories and non-Rust files
if os.is_dir(file_path) || !file.ends_with('.rs') {
continue
}
// Read the file content
file_content := os.read_file(file_path) or {
println('Failed to read file ${file_path}: ${err}')
continue
}
// Add file content to the combined source code
source_code += '// File: ${file}\n${file_content}\n\n'
}
if source_code == '' {
return error('No Rust files found in directory: ${source_code_path}')
}
return source_code
// Get all files in the directory
files := os.ls(source_code_path) or {
return error('Failed to list files in directory: ${err}')
}
// Combine all Rust files into a single source code string
mut source_code := ''
for file in files {
file_path := os.join_path(source_code_path, file)
// Skip directories and non-Rust files
if os.is_dir(file_path) || !file.ends_with('.rs') {
continue
}
// Read the file content
file_content := os.read_file(file_path) or {
println('Failed to read file ${file_path}: ${err}')
continue
}
// Add file content to the combined source code
source_code += '// File: ${file}\n${file_content}\n\n'
}
if source_code == '' {
return error('No Rust files found in directory: ${source_code_path}')
}
return source_code
}
// Determines the crate path from the source code path
fn determine_crate_path(source_code_path string) !string {
// Extract the path relative to the src directory
src_index := source_code_path.index('src/') or {
return error('Could not determine crate path: src/ not found in path')
}
mut path_parts := source_code_path[src_index+4..].split('/')
// Remove the last part (the file name)
if path_parts.len > 0 {
path_parts.delete_last()
}
rel_path := path_parts.join('::')
return 'sal::${rel_path}'
// Extract the path relative to the src directory
src_index := source_code_path.index('src/') or {
return error('Could not determine crate path: src/ not found in path')
}
mut path_parts := source_code_path[src_index + 4..].split('/')
// Remove the last part (the file name)
if path_parts.len > 0 {
path_parts.delete_last()
}
rel_path := path_parts.join('::')
return 'sal::${rel_path}'
}
// Extracts the module name from a directory path
fn extract_module_name_from_path(path string) string {
dir_parts := path.split('/')
return dir_parts[dir_parts.len - 1]
dir_parts := path.split('/')
return dir_parts[dir_parts.len - 1]
}
// Helper function to read a file or return empty string if file doesn't exist
fn read_file_safely(file_path string) string {
return os.read_file(file_path) or { '' }
return os.read_file(file_path) or { '' }
}
// Runs the task to generate Rhai wrappers
fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
// Create a new task
mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
)
// Create model configs
sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
gpt4_model := escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string {
return prompt_content
}
// Define a single unit task that handles everything
task.new_unit_task(
name: 'create_rhai_wrappers'
prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers
base_model: sonnet_model
retry_model: gpt4_model
retry_count: 1
)
// Initiate the task
return task.initiate('')
// Create a new task
mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
)
// Create model configs
sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
gpt4_model := escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string {
return prompt_content
}
// Define a single unit task that handles everything
task.new_unit_task(
name: 'create_rhai_wrappers'
prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers
base_model: sonnet_model
retry_model: gpt4_model
retry_count: 1
)
// Initiate the task
return task.initiate('')
}
// Define a Rhai wrapper generator function for Container functions
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
// Load all required template and guide files
guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')
engine := $tmpl('./prompts/engine.md')
vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')
rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')
rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
// Build the prompt content
return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md,
guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide,
generic_wrapper_rs, engine)
// Load all required template and guide files
guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')
engine := $tmpl('./prompts/engine.md')
vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')
rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')
rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
// Build the prompt content
return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md,
guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide, generic_wrapper_rs,
engine)
}
// Helper function to load guide files with error handling
fn load_guide_file(path string) string {
return os.read_file(path) or {
eprintln('Warning: Failed to read guide file: ${path}')
return ''
}
return os.read_file(path) or {
eprintln('Warning: Failed to read guide file: ${path}')
return ''
}
}
// Builds the prompt content for the AI
fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string,
errors_md string, guides string, vector_vs_array string,
rhai_integration_fixes string, rhai_syntax_guide string,
generic_wrapper_rs string, engine string) string {
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string,
errors_md string, guides string, vector_vs_array string,
rhai_integration_fixes string, rhai_syntax_guide string,
generic_wrapper_rs string, engine string) string {
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
${guides}
${vector_vs_array}
${example_rhai}
@@ -313,305 +308,289 @@ your engine create function is called `create_rhai_engine`
@[params]
pub struct WrapperModule {
pub:
lib_rs string
example_rs string
engine_rs string
cargo_toml string
example_rhai string
generic_wrapper_rs string
wrapper_rs string
lib_rs string
example_rs string
engine_rs string
cargo_toml string
example_rhai string
generic_wrapper_rs string
wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string)! string {
// Define project directory paths
name := name_
project_dir := '${base_dir}/rhai'
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(base_dir) or {
return error('Failed to change directory to base directory: ${err}')
}
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or {
return error('Failed to create examples directory: ${err}')
}
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
}
fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
// Define project directory paths
name := name_
project_dir := '${base_dir}/rhai'
// Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
return error('Failed to write wrapper.rs: ${err}')
}
}
// Write the generic wrapper.rs file
if wrapper.generic_wrapper_rs != '' {
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}')
}
}
// Write the example.rs file
if wrapper.example_rs != '' {
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
if wrapper.cargo_toml != '' {
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
}
// Write the example.rhai file if provided
if wrapper.example_rhai != '' {
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
}
return project_dir
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
}
// Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
return error('Failed to write wrapper.rs: ${err}')
}
}
// Write the generic wrapper.rs file
if wrapper.generic_wrapper_rs != '' {
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}')
}
}
// Write the example.rs file
if wrapper.example_rs != '' {
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
if wrapper.cargo_toml != '' {
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
}
// Write the example.rhai file if provided
if wrapper.example_rhai != '' {
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
}
return project_dir
}
// Helper function to extract code blocks from the response
fn extract_code_block(response string, identifier string, language string) string {
// Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}'
if language == '' {
start_marker = '```\n// ${identifier}'
}
start_index := response.index(start_marker) or {
// Try alternative format
mut alt_marker := '```${language}\n${identifier}'
if language == '' {
alt_marker = '```\n${identifier}'
}
response.index(alt_marker) or {
return ''
}
}
// Find the end marker
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or {
return ''
}
// Extract the content between the markers
content_start := start_index + start_marker.len
content := response[content_start..end_index].trim_space()
return content
// Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}'
if language == '' {
start_marker = '```\n// ${identifier}'
}
start_index := response.index(start_marker) or {
// Try alternative format
mut alt_marker := '```${language}\n${identifier}'
if language == '' {
alt_marker = '```\n${identifier}'
}
response.index(alt_marker) or { return '' }
}
// Find the end marker
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
// Extract the content between the markers
content_start := start_index + start_marker.len
content := response[content_start..end_index].trim_space()
return content
}
// Extract module name from wrapper code
fn extract_module_name(code string) string {
lines := code.split('\n')
for line in lines {
// Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name
mut parts := []string{}
if line.contains('pub mod ') {
parts = line.split('pub mod ')
} else {
parts = line.split('mod ')
}
if parts.len > 1 {
// Extract the module name and remove any trailing characters
mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space()
if name != '' {
return name
}
}
}
}
return ''
lines := code.split('\n')
for line in lines {
// Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name
mut parts := []string{}
if line.contains('pub mod ') {
parts = line.split('pub mod ')
} else {
parts = line.split('mod ')
}
if parts.len > 1 {
// Extract the module name and remove any trailing characters
mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space()
if name != '' {
return name
}
}
}
}
return ''
}
// RhaiGen struct for generating Rhai wrappers
struct RhaiGen {
name string
dir string
name string
dir string
}
// Process the AI response and compile the generated code
fn (gen RhaiGen)process_rhai_wrappers(response string)! string {
// Extract code blocks from the response
code_blocks := extract_code_blocks(response) or {
return err
}
// Extract function names from the wrapper.rs content
functions := extract_functions_from_code(code_blocks.wrapper_rs)
println('Using module name: ${gen.name}_rhai')
println('Extracted functions: ${functions.join(", ")}')
name := gen.name
// Create a WrapperModule struct with the extracted content
wrapper := WrapperModule{
lib_rs: $tmpl('./templates/lib.rs')
wrapper_rs: code_blocks.wrapper_rs
example_rs: $tmpl('./templates/example.rs')
engine_rs: code_blocks.engine_rs
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
cargo_toml: $tmpl('./templates/cargo.toml')
example_rhai: code_blocks.example_rhai
}
// Create the wrapper module
project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or {
return error('Failed to create wrapper module: ${err}')
}
// Build and run the project
build_output, run_output := build_and_run_project(project_dir) or {
return err
}
return format_success_message(project_dir, build_output, run_output)
fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
// Extract code blocks from the response
code_blocks := extract_code_blocks(response) or { return err }
// Extract function names from the wrapper.rs content
functions := extract_functions_from_code(code_blocks.wrapper_rs)
println('Using module name: ${gen.name}_rhai')
println('Extracted functions: ${functions.join(', ')}')
name := gen.name
// Create a WrapperModule struct with the extracted content
wrapper := WrapperModule{
lib_rs: $tmpl('./templates/lib.rs')
wrapper_rs: code_blocks.wrapper_rs
example_rs: $tmpl('./templates/example.rs')
engine_rs: code_blocks.engine_rs
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
cargo_toml: $tmpl('./templates/cargo.toml')
example_rhai: code_blocks.example_rhai
}
// Create the wrapper module
project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or {
return error('Failed to create wrapper module: ${err}')
}
// Build and run the project
build_output, run_output := build_and_run_project(project_dir) or { return err }
return format_success_message(project_dir, build_output, run_output)
}
// CodeBlocks struct to hold extracted code blocks
struct CodeBlocks {
wrapper_rs string
engine_rs string
example_rhai string
wrapper_rs string
engine_rs string
example_rhai string
}
// Extract code blocks from the AI response
fn extract_code_blocks(response string)! CodeBlocks {
// Extract wrapper.rs content
wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
}
// Extract engine.rs content
mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' {
// Try to extract from the response without explicit language marker
engine_rs_content = extract_code_block(response, 'engine.rs', '')
}
// Extract example.rhai content
mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' {
// Try to extract from the response without explicit language marker
example_rhai_content = extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' {
// Use the example from the template
example_rhai_content = load_example_from_template() or {
return err
}
}
}
return CodeBlocks{
wrapper_rs: wrapper_rs_content
engine_rs: engine_rs_content
example_rhai: example_rhai_content
}
fn extract_code_blocks(response string) !CodeBlocks {
// Extract wrapper.rs content
wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
}
// Extract engine.rs content
mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' {
// Try to extract from the response without explicit language marker
engine_rs_content = extract_code_block(response, 'engine.rs', '')
}
// Extract example.rhai content
mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' {
// Try to extract from the response without explicit language marker
example_rhai_content = extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' {
// Use the example from the template
example_rhai_content = load_example_from_template() or { return err }
}
}
return CodeBlocks{
wrapper_rs: wrapper_rs_content
engine_rs: engine_rs_content
example_rhai: example_rhai_content
}
}
// Load example.rhai from template file
fn load_example_from_template()! string {
example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
return error('Failed to read example.rhai template: ${err}')
}
// Extract the code block from the markdown file
example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai')
if example_rhai_content == '' {
return error('Failed to extract example.rhai from template file')
}
return example_rhai_content
fn load_example_from_template() !string {
example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
return error('Failed to read example.rhai template: ${err}')
}
// Extract the code block from the markdown file
example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai')
if example_rhai_content == '' {
return error('Failed to extract example.rhai from template file')
}
return example_rhai_content
}
// Build and run the project
fn build_and_run_project(project_dir string)! (string, string) {
// Change to the project directory
os.chdir(project_dir) or {
return error('Failed to change directory to project: ${err}')
}
// Run cargo build first
build_result := os.execute('cargo build')
if build_result.exit_code != 0 {
return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
}
// Run the example
run_result := os.execute('cargo run --example example')
return build_result.output, run_result.output
fn build_and_run_project(project_dir string) !(string, string) {
// Change to the project directory
os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
// Run cargo build first
build_result := os.execute('cargo build')
if build_result.exit_code != 0 {
return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
}
// Run the example
run_result := os.execute('cargo run --example example')
return build_result.output, run_result.output
}
// Format success message
fn format_success_message(project_dir string, build_output string, run_output string) string {
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
}
// Extract function names from wrapper code
fn extract_functions_from_code(code string) []string {
mut functions := []string{}
lines := code.split('\n')
for line in lines {
if line.contains('pub fn ') && !line.contains('//') {
// Extract function name
parts := line.split('pub fn ')
if parts.len > 1 {
name_parts := parts[1].split('(')
if name_parts.len > 0 {
fn_name := name_parts[0].trim_space()
if fn_name != '' {
functions << fn_name
}
}
}
}
}
return functions
}
mut functions := []string{}
lines := code.split('\n')
for line in lines {
if line.contains('pub fn ') && !line.contains('//') {
// Extract function name
parts := line.split('pub fn ')
if parts.len > 1 {
name_parts := parts[1].split('(')
if name_parts.len > 0 {
fn_name := name_parts[0].trim_space()
if fn_name != '' {
functions << fn_name
}
}
}
}
}
return functions
}

View File

@@ -6,285 +6,278 @@ import freeflowuniverse.herolib.ai.utils
import os
pub fn generate_rhai_wrapper(name string, source_path string) !string {
// Detect source package and module information
source_pkg_info := rust.detect_source_package(source_path)!
source_code := rust.read_source_code(source_path)!
prompt := rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
return run_wrapper_generation_task(prompt, RhaiGen{
name: name
dir: source_path
source_pkg_info: source_pkg_info
})!
// Detect source package and module information
source_pkg_info := rust.detect_source_package(source_path)!
source_code := rust.read_source_code(source_path)!
prompt := rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
return run_wrapper_generation_task(prompt, RhaiGen{
name: name
dir: source_path
source_pkg_info: source_pkg_info
})!
}
// Runs the task to generate Rhai wrappers
pub fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
// Create a new task
mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
)
// Create model configs
sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
gpt4_model := escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string {
return prompt_content
}
// Define a single unit task that handles everything
task.new_unit_task(
name: 'create_rhai_wrappers'
prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers
base_model: sonnet_model
retry_model: gpt4_model
retry_count: 1
)
// Initiate the task
return task.initiate('')
// Create a new task
mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
)
// Create model configs
sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
gpt4_model := escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string {
return prompt_content
}
// Define a single unit task that handles everything
task.new_unit_task(
name: 'create_rhai_wrappers'
prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers
base_model: sonnet_model
retry_model: gpt4_model
retry_count: 1
)
// Initiate the task
return task.initiate('')
}
// Define a Rhai wrapper generator function for Container functions
pub fn rhai_wrapper_generation_prompt(name string, source_code string, source_pkg_info rust.SourcePackageInfo) !string {
current_dir := os.dir(@FILE)
example_rhai := os.read_file('${current_dir}/prompts/example_script.md')!
wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md')!
errors_md := os.read_file('${current_dir}/prompts/errors.md')!
current_dir := os.dir(@FILE)
example_rhai := os.read_file('${current_dir}/prompts/example_script.md')!
wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md')!
errors_md := os.read_file('${current_dir}/prompts/errors.md')!
// Load all required template and guide files
guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')!
engine := $tmpl('./prompts/engine.md')
vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')!
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')!
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')!
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
prompt := $tmpl('./prompts/main.md')
guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')!
engine := $tmpl('./prompts/engine.md')
vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')!
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')!
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')!
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
prompt := $tmpl('./prompts/main.md')
return prompt
}
@[params]
pub struct WrapperModule {
pub:
lib_rs string
example_rs string
engine_rs string
cargo_toml string
example_rhai string
generic_wrapper_rs string
wrapper_rs string
lib_rs string
example_rs string
engine_rs string
cargo_toml string
example_rhai string
generic_wrapper_rs string
wrapper_rs string
}
// functions is a list of function names that AI should extract and pass in
pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string {
pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string) !string {
// Define project directory paths
project_dir := '${path}/rhai'
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(path) or {
return error('Failed to change directory to base directory: ${err}')
}
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or {
return error('Failed to create examples directory: ${err}')
}
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
} else {
// Use default lib.rs template if none provided
lib_rs_content := $tmpl('./templates/lib.rs')
os.write_file('${project_dir}/src/lib.rs', lib_rs_content) or {
return error('Failed to write lib.rs: ${err}')
}
}
project_dir := '${path}/rhai'
// Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
return error('Failed to write wrapper.rs: ${err}')
}
}
// Write the generic wrapper.rs file
if wrapper.generic_wrapper_rs != '' {
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}')
}
}
// Write the example.rs file
if wrapper.example_rs != '' {
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
} else {
// Use default example.rs template if none provided
example_rs_content := $tmpl('./templates/example.rs')
os.write_file('${examples_dir}/example.rs', example_rs_content) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
// Write the example.rhai file
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
return project_dir
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(path) or { return error('Failed to change directory to base directory: ${err}') }
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
} else {
// Use default lib.rs template if none provided
lib_rs_content := $tmpl('./templates/lib.rs')
os.write_file('${project_dir}/src/lib.rs', lib_rs_content) or {
return error('Failed to write lib.rs: ${err}')
}
}
// Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
return error('Failed to write wrapper.rs: ${err}')
}
}
// Write the generic wrapper.rs file
if wrapper.generic_wrapper_rs != '' {
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}')
}
}
// Write the example.rs file
if wrapper.example_rs != '' {
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
} else {
// Use default example.rs template if none provided
example_rs_content := $tmpl('./templates/example.rs')
os.write_file('${examples_dir}/example.rs', example_rs_content) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
// Write the example.rhai file
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
return project_dir
}
// Extract module name from wrapper code
fn extract_module_name(code string) string {
lines := code.split('\n')
for line in lines {
// Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name
mut parts := []string{}
if line.contains('pub mod ') {
parts = line.split('pub mod ')
} else {
parts = line.split('mod ')
}
if parts.len > 1 {
// Extract the module name and remove any trailing characters
mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space()
if name != '' {
return name
}
}
}
}
return ''
lines := code.split('\n')
for line in lines {
// Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name
mut parts := []string{}
if line.contains('pub mod ') {
parts = line.split('pub mod ')
} else {
parts = line.split('mod ')
}
if parts.len > 1 {
// Extract the module name and remove any trailing characters
mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space()
if name != '' {
return name
}
}
}
}
return ''
}
// RhaiGen struct for generating Rhai wrappers
struct RhaiGen {
name string
dir string
source_pkg_info rust.SourcePackageInfo
name string
dir string
source_pkg_info rust.SourcePackageInfo
}
// Process the AI response and compile the generated code
pub fn (gen RhaiGen) process_rhai_wrappers(input string) !string {
blocks := extract_code_blocks(input)!
source_pkg_info := gen.source_pkg_info
// Create the module structure
mod := WrapperModule{
lib_rs: blocks.lib_rs
engine_rs: blocks.engine_rs
example_rhai: blocks.example_rhai
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
wrapper_rs: blocks.wrapper_rs
}
// Write the module files
project_dir := write_rhai_wrapper_module(mod, gen.name, gen.dir)!
return project_dir
blocks := extract_code_blocks(input)!
source_pkg_info := gen.source_pkg_info
// Create the module structure
mod := WrapperModule{
lib_rs: blocks.lib_rs
engine_rs: blocks.engine_rs
example_rhai: blocks.example_rhai
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
wrapper_rs: blocks.wrapper_rs
}
// Write the module files
project_dir := write_rhai_wrapper_module(mod, gen.name, gen.dir)!
return project_dir
}
// CodeBlocks struct to hold extracted code blocks
struct CodeBlocks {
wrapper_rs string
engine_rs string
example_rhai string
lib_rs string
wrapper_rs string
engine_rs string
example_rhai string
lib_rs string
}
// Extract code blocks from the AI response
fn extract_code_blocks(response string)! CodeBlocks {
// Extract wrapper.rs content
wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
}
// Extract engine.rs content
mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' {
// Try to extract from the response without explicit language marker
engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
}
// Extract example.rhai content
mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' {
// Try to extract from the response without explicit language marker
example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' {
return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
}
}
// Extract lib.rs content
lib_rs_content := utils.extract_code_block(response, 'lib.rs', 'rust')
if lib_rs_content == '' {
return error('Failed to extract lib.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// lib.rs and ends with ```')
}
return CodeBlocks{
wrapper_rs: wrapper_rs_content
engine_rs: engine_rs_content
example_rhai: example_rhai_content
lib_rs: lib_rs_content
}
fn extract_code_blocks(response string) !CodeBlocks {
// Extract wrapper.rs content
wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
}
// Extract engine.rs content
mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' {
// Try to extract from the response without explicit language marker
engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
}
// Extract example.rhai content
mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' {
// Try to extract from the response without explicit language marker
example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' {
return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
}
}
// Extract lib.rs content
lib_rs_content := utils.extract_code_block(response, 'lib.rs', 'rust')
if lib_rs_content == '' {
return error('Failed to extract lib.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// lib.rs and ends with ```')
}
return CodeBlocks{
wrapper_rs: wrapper_rs_content
engine_rs: engine_rs_content
example_rhai: example_rhai_content
lib_rs: lib_rs_content
}
}
// Format success message
fn format_success_message(project_dir string, build_output string, run_output string) string {
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
}

View File

@@ -20,7 +20,7 @@ import os
// name: 'rhai_wrapper_creator.escalayer'
// description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
// )
// // Create model configs
// sonnet_model := escalayer.ModelConfig{
// name: 'anthropic/claude-3.7-sonnet'
@@ -28,19 +28,19 @@ import os
// temperature: 0.7
// max_tokens: 25000
// }
// gpt4_model := escalayer.ModelConfig{
// name: 'gpt-4'
// provider: 'openai'
// temperature: 0.7
// max_tokens: 25000
// }
// // Create a prompt function that returns the prepared content
// prompt_function := fn [prompt_content] (input string) string {
// return prompt_content
// }
// // Define a single unit task that handles everything
// task.new_unit_task(
// name: 'create_rhai_wrappers'
@@ -50,7 +50,7 @@ import os
// retry_model: gpt4_model
// retry_count: 1
// )
// // Initiate the task
// return task.initiate('')
// }
@@ -69,33 +69,33 @@ import os
// // functions is a list of function names that AI should extract and pass in
// pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string {
// // Define project directory paths
// project_dir := '${path}/rhai'
// // Create the project using cargo new --lib
// if os.exists(project_dir) {
// os.rmdir_all(project_dir) or {
// return error('Failed to clean existing project directory: ${err}')
// }
// }
// // Run cargo new --lib to create the project
// os.chdir(path) or {
// return error('Failed to change directory to base directory: ${err}')
// }
// cargo_new_result := os.execute('cargo new --lib rhai')
// if cargo_new_result.exit_code != 0 {
// return error('Failed to create new library project: ${cargo_new_result.output}')
// }
// // Create examples directory
// examples_dir := '${project_dir}/examples'
// os.mkdir_all(examples_dir) or {
// return error('Failed to create examples directory: ${err}')
// }
// // Write the lib.rs file
// if wrapper.lib_rs != '' {
// os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
@@ -109,47 +109,45 @@ import os
// return error('Failed to write wrapper.rs: ${err}')
// }
// }
// // Write the generic wrapper.rs file
// if wrapper.generic_wrapper_rs != '' {
// os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
// return error('Failed to write generic wrapper.rs: ${err}')
// }
// }
// // Write the example.rs file
// if wrapper.example_rs != '' {
// os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
// return error('Failed to write example.rs: ${err}')
// }
// }
// // Write the engine.rs file if provided
// if wrapper.engine_rs != '' {
// os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
// return error('Failed to write engine.rs: ${err}')
// }
// }
// // Write the Cargo.toml file
// os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
// return error('Failed to write Cargo.toml: ${err}')
// }
// // Write the example.rhai file
// os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
// return error('Failed to write example.rhai: ${err}')
// }
// return project_dir
// }
// // Extract module name from wrapper code
// fn extract_module_name(code string) string {
// lines := code.split('\n')
// for line in lines {
// // Look for pub mod or mod declarations
// if line.contains('pub mod ') || line.contains('mod ') {
@@ -160,7 +158,7 @@ import os
// } else {
// parts = line.split('mod ')
// }
// if parts.len > 1 {
// // Extract the module name and remove any trailing characters
// mut name := parts[1].trim_space()
@@ -172,7 +170,7 @@ import os
// }
// }
// }
// return ''
// }
@@ -188,9 +186,9 @@ import os
// code_blocks := extract_code_blocks(response) or {
// return err
// }
// name := gen.name
// // Create a WrapperModule struct with the extracted content
// wrapper := WrapperModule{
// lib_rs: $tmpl('./templates/lib.rs')
@@ -201,17 +199,17 @@ import os
// cargo_toml: $tmpl('./templates/cargo.toml')
// example_rhai: code_blocks.example_rhai
// }
// // Create the wrapper module
// project_dir := write_rhai_wrapper_module(wrapper, gen.name, gen.dir) or {
// return error('Failed to create wrapper module: ${err}')
// }
// // Build and run the project
// build_output, run_output := rust.run_example(project_dir, 'example') or {
// return err
// }
// return format_success_message(project_dir, build_output, run_output)
// }
@@ -229,14 +227,14 @@ import os
// if wrapper_rs_content == '' {
// return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
// }
// // Extract engine.rs content
// mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
// if engine_rs_content == '' {
// // Try to extract from the response without explicit language marker
// engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
// }
// // Extract example.rhai content
// mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
// if example_rhai_content == '' {
@@ -246,7 +244,7 @@ import os
// return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
// }
// }
// return CodeBlocks{
// wrapper_rs: wrapper_rs_content
// engine_rs: engine_rs_content

View File

@@ -2,17 +2,17 @@ module mcp
import cli
pub const command := cli.Command{
sort_flags: true
name: 'rhai'
pub const command = cli.Command{
sort_flags: true
name: 'rhai'
// execute: cmd_mcpgen
description: 'rhai command'
commands: [
commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the Rhai server'
}
},
]
}
@@ -20,4 +20,3 @@ fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server()!
server.start()!
}

View File

@@ -9,10 +9,10 @@ pub fn new_mcp_server() !&mcp.Server {
// Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
tools: {
'generate_rhai_wrapper': generate_rhai_wrapper_spec
}
tool_handlers: {
tool_handlers: {
'generate_rhai_wrapper': generate_rhai_wrapper_handler
}
prompts: {
@@ -30,4 +30,4 @@ pub fn new_mcp_server() !&mcp.Server {
}
})!
return server
}
}

View File

@@ -5,39 +5,41 @@ import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.ai.mcp.rhai.logic
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.lang.rust
import x.json2 as json { Any }
import x.json2 as json
// Tool definition for the create_rhai_wrapper function
const rhai_wrapper_prompt_spec = mcp.Prompt{
name: 'rhai_wrapper'
description: 'provides a prompt for creating Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
arguments: [
mcp.PromptArgument{
name: 'source_path'
description: 'Path to the source directory'
required: true
}
]
name: 'rhai_wrapper'
description: 'provides a prompt for creating Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
arguments: [
mcp.PromptArgument{
name: 'source_path'
description: 'Path to the source directory'
required: true
},
]
}
// Tool handler for the create_rhai_wrapper function
pub fn rhai_wrapper_prompt_handler(arguments []string) ![]mcp.PromptMessage {
source_path := arguments[0]
// Read and combine all Rust files in the source directory
source_code := rust.read_source_code(source_path)!
// Extract the module name from the directory path (last component)
name := rust.extract_module_name_from_path(source_path)
source_pkg_info := rust.detect_source_package(source_path)!
// Read and combine all Rust files in the source directory
source_code := rust.read_source_code(source_path)!
result := logic.rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
return [mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: result
}
}]
}
// Extract the module name from the directory path (last component)
name := rust.extract_module_name_from_path(source_path)
source_pkg_info := rust.detect_source_package(source_path)!
result := logic.rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
return [
mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: result
}
},
]
}

View File

@@ -1,19 +1,19 @@
module mcp
import freeflowuniverse.herolib.ai.mcp
import x.json2 as json { Any }
import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import log
const specs = mcp.Tool{
name: 'rhai_interface'
description: 'Add Rhai Interface to Rust Code Files'
input_schema: jsonschema.Schema{
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string',
description: 'Path to a .rs file or directory containing .rs files to make rhai interface for',
typ: 'string'
description: 'Path to a .rs file or directory containing .rs files to make rhai interface for'
})
}
required: ['path']

View File

@@ -8,32 +8,31 @@ import x.json2 as json { Any }
// Tool definition for the generate_rhai_wrapper function
const generate_rhai_wrapper_spec = mcp.Tool{
name: 'generate_rhai_wrapper'
description: 'generate_rhai_wrapper receives the name of a V language function string, and the path to the module in which it exists.'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
}),
'source_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
required: ['name', 'source_path']
}
name: 'generate_rhai_wrapper'
description: 'generate_rhai_wrapper receives the name of a V language function string, and the path to the module in which it exists.'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'source_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
required: ['name', 'source_path']
}
}
// Tool handler for the generate_rhai_wrapper function
pub fn generate_rhai_wrapper_handler(arguments map[string]Any) !mcp.ToolCallResult {
name := arguments['name'].str()
source_path := arguments['source_path'].str()
result := logic.generate_rhai_wrapper(name, source_path)
or {
result := logic.generate_rhai_wrapper(name, source_path) or {
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}

View File

@@ -1 +1 @@
module rhai
module rhai

View File

@@ -2,16 +2,16 @@ module rust
import cli
pub const command := cli.Command{
pub const command = cli.Command{
sort_flags: true
name: 'rust'
description: 'Rust language tools command'
commands: [
commands: [
cli.Command{
name: 'start'
execute: cmd_start
description: 'start the Rust MCP server'
}
},
]
}

View File

@@ -1,6 +1,6 @@
module rust
import freeflowuniverse.herolib.ai.mcp {ToolContent}
import freeflowuniverse.herolib.ai.mcp { ToolContent }
pub fn result_to_mcp_tool_contents[T](result T) []ToolContent {
return [result_to_mcp_tool_content[T](result)]
@@ -51,4 +51,4 @@ pub fn array_to_mcp_tool_contents[U](array []U) []ToolContent {
contents << result_to_mcp_tool_content(item)
}
return contents
}
}

View File

@@ -9,40 +9,40 @@ pub fn new_mcp_server() !&mcp.Server {
// Initialize the server with tools and prompts
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
tools: {
'list_functions_in_file': list_functions_in_file_spec
'list_structs_in_file': list_structs_in_file_spec
'list_modules_in_dir': list_modules_in_dir_spec
'get_import_statement': get_import_statement_spec
'list_structs_in_file': list_structs_in_file_spec
'list_modules_in_dir': list_modules_in_dir_spec
'get_import_statement': get_import_statement_spec
// 'get_module_dependency': get_module_dependency_spec
}
tool_handlers: {
tool_handlers: {
'list_functions_in_file': list_functions_in_file_handler
'list_structs_in_file': list_structs_in_file_handler
'list_modules_in_dir': list_modules_in_dir_handler
'get_import_statement': get_import_statement_handler
'list_structs_in_file': list_structs_in_file_handler
'list_modules_in_dir': list_modules_in_dir_handler
'get_import_statement': get_import_statement_handler
// 'get_module_dependency': get_module_dependency_handler
}
prompts: {
'rust_functions': rust_functions_prompt_spec
'rust_structs': rust_structs_prompt_spec
'rust_modules': rust_modules_prompt_spec
'rust_imports': rust_imports_prompt_spec
prompts: {
'rust_functions': rust_functions_prompt_spec
'rust_structs': rust_structs_prompt_spec
'rust_modules': rust_modules_prompt_spec
'rust_imports': rust_imports_prompt_spec
'rust_dependencies': rust_dependencies_prompt_spec
'rust_tools_guide': rust_tools_guide_prompt_spec
'rust_tools_guide': rust_tools_guide_prompt_spec
}
prompt_handlers: {
'rust_functions': rust_functions_prompt_handler
'rust_structs': rust_structs_prompt_handler
'rust_modules': rust_modules_prompt_handler
'rust_imports': rust_imports_prompt_handler
'rust_functions': rust_functions_prompt_handler
'rust_structs': rust_structs_prompt_handler
'rust_modules': rust_modules_prompt_handler
'rust_imports': rust_imports_prompt_handler
'rust_dependencies': rust_dependencies_prompt_handler
'rust_tools_guide': rust_tools_guide_prompt_handler
'rust_tools_guide': rust_tools_guide_prompt_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
server_info: mcp.ServerInfo{
name: 'rust'
name: 'rust'
version: '1.0.0'
}
}

View File

@@ -2,113 +2,123 @@ module rust
import freeflowuniverse.herolib.ai.mcp
import os
import x.json2 as json { Any }
import x.json2 as json
// Prompt specification for Rust functions
const rust_functions_prompt_spec = mcp.Prompt{
name: 'rust_functions'
name: 'rust_functions'
description: 'Provides guidance on working with Rust functions and using the list_functions_in_file tool'
arguments: []
arguments: []
}
// Handler for rust_functions prompt
pub fn rust_functions_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/functions.md')!
return [mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
}]
return [
mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
},
]
}
// Prompt specification for Rust structs
const rust_structs_prompt_spec = mcp.Prompt{
name: 'rust_structs'
name: 'rust_structs'
description: 'Provides guidance on working with Rust structs and using the list_structs_in_file tool'
arguments: []
arguments: []
}
// Handler for rust_structs prompt
pub fn rust_structs_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/structs.md')!
return [mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
}]
return [
mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
},
]
}
// Prompt specification for Rust modules
const rust_modules_prompt_spec = mcp.Prompt{
name: 'rust_modules'
name: 'rust_modules'
description: 'Provides guidance on working with Rust modules and using the list_modules_in_dir tool'
arguments: []
arguments: []
}
// Handler for rust_modules prompt
pub fn rust_modules_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/modules.md')!
return [mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
}]
return [
mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
},
]
}
// Prompt specification for Rust imports
const rust_imports_prompt_spec = mcp.Prompt{
name: 'rust_imports'
name: 'rust_imports'
description: 'Provides guidance on working with Rust imports and using the get_import_statement tool'
arguments: []
arguments: []
}
// Handler for rust_imports prompt
pub fn rust_imports_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/imports.md')!
return [mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
}]
return [
mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
},
]
}
// Prompt specification for Rust dependencies
const rust_dependencies_prompt_spec = mcp.Prompt{
name: 'rust_dependencies'
name: 'rust_dependencies'
description: 'Provides guidance on working with Rust dependencies and using the get_module_dependency tool'
arguments: []
arguments: []
}
// Handler for rust_dependencies prompt
pub fn rust_dependencies_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/dependencies.md')!
return [mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
}]
return [
mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: content
}
},
]
}
// Prompt specification for general Rust tools guide
const rust_tools_guide_prompt_spec = mcp.Prompt{
name: 'rust_tools_guide'
name: 'rust_tools_guide'
description: 'Provides a comprehensive guide on all available Rust tools and how to use them'
arguments: []
arguments: []
}
// Handler for rust_tools_guide prompt
@@ -119,26 +129,23 @@ pub fn rust_tools_guide_prompt_handler(arguments []string) ![]mcp.PromptMessage
modules_content := os.read_file('${os.dir(@FILE)}/prompts/modules.md')!
imports_content := os.read_file('${os.dir(@FILE)}/prompts/imports.md')!
dependencies_content := os.read_file('${os.dir(@FILE)}/prompts/dependencies.md')!
combined_content := '# Rust Language Tools Guide\n\n' +
'This guide provides comprehensive information on working with Rust code using the available tools.\n\n' +
'## Table of Contents\n\n' +
'1. [Functions](#functions)\n' +
'2. [Structs](#structs)\n' +
'3. [Modules](#modules)\n' +
'4. [Imports](#imports)\n' +
'5. [Dependencies](#dependencies)\n\n' +
'<a name="functions"></a>\n' + functions_content + '\n\n' +
'<a name="structs"></a>\n' + structs_content + '\n\n' +
'<a name="modules"></a>\n' + modules_content + '\n\n' +
'<a name="imports"></a>\n' + imports_content + '\n\n' +
'<a name="dependencies"></a>\n' + dependencies_content
return [mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: combined_content
}
}]
'## Table of Contents\n\n' + '1. [Functions](#functions)\n' + '2. [Structs](#structs)\n' +
'3. [Modules](#modules)\n' + '4. [Imports](#imports)\n' +
'5. [Dependencies](#dependencies)\n\n' + '<a name="functions"></a>\n' + functions_content +
'\n\n' + '<a name="structs"></a>\n' + structs_content + '\n\n' +
'<a name="modules"></a>\n' + modules_content + '\n\n' + '<a name="imports"></a>\n' +
imports_content + '\n\n' + '<a name="dependencies"></a>\n' + dependencies_content
return [
mcp.PromptMessage{
role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: combined_content
}
},
]
}

View File

@@ -1,111 +1,105 @@
module rust
import freeflowuniverse.herolib.ai.mcp {ToolContent}
import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.lang.rust
import freeflowuniverse.herolib.schemas.jsonschema
import x.json2 as json { Any }
// Tool specification for listing functions in a Rust file
const list_functions_in_file_spec = mcp.Tool{
name: 'list_functions_in_file'
description: 'Lists all function definitions in a Rust file'
name: 'list_functions_in_file'
description: 'Lists all function definitions in a Rust file'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
description: 'Path to the Rust file'
})
}
required: ['file_path']
required: ['file_path']
}
}
// Handler for list_functions_in_file
pub fn list_functions_in_file_handler(arguments map[string]Any) !mcp.ToolCallResult {
file_path := arguments['file_path'].str()
result := rust.list_functions_in_file(file_path) or {
return mcp.error_tool_call_result(err)
}
result := rust.list_functions_in_file(file_path) or { return mcp.error_tool_call_result(err) }
return mcp.ToolCallResult{
is_error: false
content: mcp.array_to_mcp_tool_contents[string](result)
content: mcp.array_to_mcp_tool_contents[string](result)
}
}
// Tool specification for listing structs in a Rust file
const list_structs_in_file_spec = mcp.Tool{
name: 'list_structs_in_file'
description: 'Lists all struct definitions in a Rust file'
name: 'list_structs_in_file'
description: 'Lists all struct definitions in a Rust file'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
description: 'Path to the Rust file'
})
}
required: ['file_path']
required: ['file_path']
}
}
// Handler for list_structs_in_file
pub fn list_structs_in_file_handler(arguments map[string]Any) !mcp.ToolCallResult {
file_path := arguments['file_path'].str()
result := rust.list_structs_in_file(file_path) or {
return mcp.error_tool_call_result(err)
}
result := rust.list_structs_in_file(file_path) or { return mcp.error_tool_call_result(err) }
return mcp.ToolCallResult{
is_error: false
content: mcp.array_to_mcp_tool_contents[string](result)
content: mcp.array_to_mcp_tool_contents[string](result)
}
}
// Tool specification for listing modules in a directory
const list_modules_in_dir_spec = mcp.Tool{
name: 'list_modules_in_dir'
description: 'Lists all Rust modules in a directory'
name: 'list_modules_in_dir'
description: 'Lists all Rust modules in a directory'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'dir_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
description: 'Path to the directory'
})
}
required: ['dir_path']
required: ['dir_path']
}
}
// Handler for list_modules_in_dir
pub fn list_modules_in_dir_handler(arguments map[string]Any) !mcp.ToolCallResult {
dir_path := arguments['dir_path'].str()
result := rust.list_modules_in_directory(dir_path) or {
return mcp.error_tool_call_result(err)
}
result := rust.list_modules_in_directory(dir_path) or { return mcp.error_tool_call_result(err) }
return mcp.ToolCallResult{
is_error: false
content: mcp.array_to_mcp_tool_contents[string](result)
content: mcp.array_to_mcp_tool_contents[string](result)
}
}
// Tool specification for getting an import statement
const get_import_statement_spec = mcp.Tool{
name: 'get_import_statement'
description: 'Generates appropriate Rust import statement for a module based on file paths'
name: 'get_import_statement'
description: 'Generates appropriate Rust import statement for a module based on file paths'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'current_file': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
'current_file': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Path to the file where the import will be added'
}),
})
'target_module': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
description: 'Path to the target module to be imported'
})
}
required: ['current_file', 'target_module']
required: ['current_file', 'target_module']
}
}
@@ -118,33 +112,33 @@ pub fn get_import_statement_handler(arguments map[string]Any) !mcp.ToolCallResul
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
// Tool specification for getting module dependency information
const get_module_dependency_spec = mcp.Tool{
name: 'get_module_dependency'
description: 'Gets dependency information for adding a Rust module to a project'
name: 'get_module_dependency'
description: 'Gets dependency information for adding a Rust module to a project'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'importer_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
description: 'Path to the file that will import the module'
}),
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Path to the module that will be imported'
})
}
required: ['importer_path', 'module_path']
required: ['importer_path', 'module_path']
}
}
struct Tester {
import_statement string
module_path string
module_path string
}
// Handler for get_module_dependency
@@ -157,9 +151,9 @@ pub fn get_module_dependency_handler(arguments map[string]Any) !mcp.ToolCallResu
return mcp.ToolCallResult{
is_error: false
content: result_to_mcp_tool_contents[Tester](Tester{
content: result_to_mcp_tool_contents[Tester](Tester{
import_statement: dependency.import_statement
module_path: dependency.module_path
module_path: dependency.module_path
}) // Return JSON string
}
}
@@ -168,21 +162,21 @@ pub fn get_module_dependency_handler(arguments map[string]Any) !mcp.ToolCallResu
// Specification for get_function_from_file tool
const get_function_from_file_spec = mcp.Tool{
name: 'get_function_from_file'
description: 'Get the declaration of a Rust function from a specified file path.'
name: 'get_function_from_file'
description: 'Get the declaration of a Rust function from a specified file path.'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Path to the Rust file.'
}),
})
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Name of the function to retrieve (e.g., \'my_function\' or \'MyStruct::my_method\').'
typ: 'string'
description: "Name of the function to retrieve (e.g., 'my_function' or 'MyStruct::my_method')."
})
}
required: ['file_path', 'function_name']
required: ['file_path', 'function_name']
}
}
@@ -195,7 +189,7 @@ pub fn get_function_from_file_handler(arguments map[string]Any) !mcp.ToolCallRes
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -203,21 +197,21 @@ pub fn get_function_from_file_handler(arguments map[string]Any) !mcp.ToolCallRes
// Specification for get_function_from_module tool
const get_function_from_module_spec = mcp.Tool{
name: 'get_function_from_module'
description: 'Get the declaration of a Rust function from a specified module path (directory or file).'
name: 'get_function_from_module'
description: 'Get the declaration of a Rust function from a specified module path (directory or file).'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Path to the Rust module directory or file.'
}),
})
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Name of the function to retrieve (e.g., \'my_function\' or \'MyStruct::my_method\').'
typ: 'string'
description: "Name of the function to retrieve (e.g., 'my_function' or 'MyStruct::my_method')."
})
}
required: ['module_path', 'function_name']
required: ['module_path', 'function_name']
}
}
@@ -230,7 +224,7 @@ pub fn get_function_from_module_handler(arguments map[string]Any) !mcp.ToolCallR
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -238,21 +232,21 @@ pub fn get_function_from_module_handler(arguments map[string]Any) !mcp.ToolCallR
// Specification for get_struct_from_file tool
const get_struct_from_file_spec = mcp.Tool{
name: 'get_struct_from_file'
description: 'Get the declaration of a Rust struct from a specified file path.'
name: 'get_struct_from_file'
description: 'Get the declaration of a Rust struct from a specified file path.'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Path to the Rust file.'
}),
})
'struct_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Name of the struct to retrieve (e.g., \'MyStruct\').'
typ: 'string'
description: "Name of the struct to retrieve (e.g., 'MyStruct')."
})
}
required: ['file_path', 'struct_name']
required: ['file_path', 'struct_name']
}
}
@@ -265,7 +259,7 @@ pub fn get_struct_from_file_handler(arguments map[string]Any) !mcp.ToolCallResul
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
@@ -273,21 +267,21 @@ pub fn get_struct_from_file_handler(arguments map[string]Any) !mcp.ToolCallResul
// Specification for get_struct_from_module tool
const get_struct_from_module_spec = mcp.Tool{
name: 'get_struct_from_module'
description: 'Get the declaration of a Rust struct from a specified module path (directory or file).'
name: 'get_struct_from_module'
description: 'Get the declaration of a Rust struct from a specified module path (directory or file).'
input_schema: jsonschema.Schema{
typ: 'object'
typ: 'object'
properties: {
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
description: 'Path to the Rust module directory or file.'
}),
})
'struct_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
description: 'Name of the struct to retrieve (e.g., \'MyStruct\').'
typ: 'string'
description: "Name of the struct to retrieve (e.g., 'MyStruct')."
})
}
required: ['module_path', 'struct_name']
required: ['module_path', 'struct_name']
}
}
@@ -300,6 +294,6 @@ pub fn get_struct_from_module_handler(arguments map[string]Any) !mcp.ToolCallRes
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](result)
content: mcp.result_to_mcp_tool_contents[string](result)
}
}
}

View File

@@ -8,7 +8,7 @@ fn main() {
eprintln('Failed to create MCP server: ${err}')
return
}
// Start the server
server.start() or {
eprintln('Failed to start MCP server: ${err}')

View File

@@ -15,11 +15,11 @@ pub fn new_mcp_server(v &VCode) !&mcp.Server {
mut server := mcp.new_server(mcp.MemoryBackend{
tools: {
'get_function_from_file': get_function_from_file_tool
'write_vfile': write_vfile_tool
'write_vfile': write_vfile_tool
}
tool_handlers: {
'get_function_from_file': v.get_function_from_file_tool_handler
'write_vfile': v.write_vfile_tool_handler
'write_vfile': v.write_vfile_tool_handler
}
}, mcp.ServerParams{
config: mcp.ServerConfiguration{
@@ -30,4 +30,4 @@ pub fn new_mcp_server(v &VCode) !&mcp.Server {
}
})!
return server
}
}

View File

@@ -3,7 +3,7 @@ module vcode
import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.schemas.jsonschema
import x.json2 {Any}
import x.json2 { Any }
const get_function_from_file_tool = mcp.Tool{
name: 'get_function_from_file'
@@ -16,10 +16,10 @@ RETURNS: string - the function block including comments, or empty string if not
typ: 'object'
properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
})
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
typ: 'string'
})
}
required: ['file_path', 'function_name']

View File

@@ -3,7 +3,7 @@ module vcode
import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.schemas.jsonschema
import x.json2 {Any}
import x.json2 { Any }
const write_vfile_tool = mcp.Tool{
name: 'write_vfile'
@@ -18,20 +18,20 @@ RETURNS: string - success message with the path of the written file'
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'code': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
'code': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
'format': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'boolean'
'format': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'boolean'
})
'overwrite': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'boolean'
typ: 'boolean'
})
'prefix': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
'prefix': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string'
})
}
required: ['path', 'code']
@@ -41,31 +41,27 @@ RETURNS: string - success message with the path of the written file'
pub fn (d &VCode) write_vfile_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
code_str := arguments['code'].str()
// Parse optional parameters with defaults
format := if 'format' in arguments { arguments['format'].bool() } else { false }
overwrite := if 'overwrite' in arguments { arguments['overwrite'].bool() } else { false }
prefix := if 'prefix' in arguments { arguments['prefix'].str() } else { '' }
// Create write options
options := code.WriteOptions{
format: format
format: format
overwrite: overwrite
prefix: prefix
prefix: prefix
}
// Parse the V code string into a VFile
vfile := code.parse_vfile(code_str) or {
return mcp.error_tool_call_result(err)
}
vfile := code.parse_vfile(code_str) or { return mcp.error_tool_call_result(err) }
// Write the VFile to the specified path
vfile.write(path, options) or {
return mcp.error_tool_call_result(err)
}
vfile.write(path, options) or { return mcp.error_tool_call_result(err) }
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string]('Successfully wrote V file to ${path}')
content: mcp.result_to_mcp_tool_contents[string]('Successfully wrote V file to ${path}')
}
}

View File

@@ -8,47 +8,47 @@ import os
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str()
// Check if path exists
if !os.exists(path) {
return mcp.ToolCallResult{
is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
}
}
// Determine if path is a file or directory
is_directory := os.is_dir(path)
mut message := ""
mut message := ''
if is_directory {
// Convert all pug files in the directory
pugconvert.convert_pug(path) or {
return mcp.ToolCallResult{
is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error converting pug files in directory: ${err}")
content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
}
}
message = "Successfully converted all pug files in directory '${path}'"
} else if path.ends_with(".v") {
} else if path.ends_with('.v') {
// Convert a single pug file
pugconvert.convert_pug_file(path) or {
return mcp.ToolCallResult{
is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error converting pug file: ${err}")
content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
}
}
message = "Successfully converted pug file '${path}'"
} else {
return mcp.ToolCallResult{
is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
}
}
return mcp.ToolCallResult{
is_error: false
content: mcp.result_to_mcp_tool_contents[string](message)
content: mcp.result_to_mcp_tool_contents[string](message)
}
}

View File

@@ -1,18 +1,18 @@
module pugconvert
import freeflowuniverse.herolib.ai.mcp
import x.json2 as json { Any }
import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.ai.mcp.logger
const specs = mcp.Tool{
name: 'pugconvert'
description: 'Convert Pug template files to Jet template files'
input_schema: jsonschema.Schema{
input_schema: jsonschema.Schema{
typ: 'object'
properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string',
typ: 'string'
description: 'Path to a .pug file or directory containing .pug files to convert'
})
}

View File

@@ -2,33 +2,29 @@ module utils
// Helper function to extract code blocks from the response
pub fn extract_code_block(response string, identifier string, language string) string {
// Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}'
if language == '' {
start_marker = '```\n// ${identifier}'
}
start_index := response.index(start_marker) or {
// Try alternative format
mut alt_marker := '```${language}\n${identifier}'
if language == '' {
alt_marker = '```\n${identifier}'
}
response.index(alt_marker) or {
return ''
}
}
// Find the end marker
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or {
return ''
}
// Extract the content between the markers
content_start := start_index + start_marker.len
content := response[content_start..end_index].trim_space()
return content
}
// Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}'
if language == '' {
start_marker = '```\n// ${identifier}'
}
start_index := response.index(start_marker) or {
// Try alternative format
mut alt_marker := '```${language}\n${identifier}'
if language == '' {
alt_marker = '```\n${identifier}'
}
response.index(alt_marker) or { return '' }
}
// Find the end marker
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
// Extract the content between the markers
content_start := start_index + start_marker.len
content := response[content_start..end_index].trim_space()
return content
}

View File

@@ -11,8 +11,7 @@ pub fn generate_module_from_openapi(openapi_path string) !string {
openapi_spec := openapi.new(path: openapi_path)!
actor_spec := specification.from_openapi(openapi_spec)!
actor_module := generator.generate_actor_module(
actor_spec,
actor_module := generate_actor_module(actor_spec,
interfaces: [.openapi, .http]
)!

View File

@@ -1,6 +1,6 @@
module generator
import freeflowuniverse.herolib.core.code { Array, CodeItem, Function, Import, Param, Result, Struct, VFile }
import freeflowuniverse.herolib.core.code { CodeItem, Function, Import, Param, Result, Struct, VFile }
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc
@@ -18,12 +18,13 @@ pub struct Source {
}
pub fn generate_methods_file_str(source Source) !string {
actor_spec := if path := source.openapi_path {
actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)!
} else {
panic('No openapi or openrpc path provided')
}
else { panic('No openapi or openrpc path provided') }
return generate_methods_file(actor_spec)!.write_str()!
}

View File

@@ -10,12 +10,13 @@ import freeflowuniverse.herolib.baobab.specification { ActorMethod, ActorSpecifi
import freeflowuniverse.herolib.schemas.openapi
pub fn generate_methods_example_file_str(source Source) !string {
actor_spec := if path := source.openapi_path {
actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)!
} else {
panic('No openapi or openrpc path provided')
}
else { panic('No openapi or openrpc path provided') }
return generate_methods_example_file(actor_spec)!.write_str()!
}

View File

@@ -8,12 +8,13 @@ import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc
pub fn generate_methods_interface_file_str(source Source) !string {
actor_spec := if path := source.openapi_path {
actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)!
} else {
panic('No openapi or openrpc path provided')
}
else { panic('No openapi or openrpc path provided') }
return generate_methods_interface_file(actor_spec)!.write_str()!
}

View File

@@ -8,12 +8,13 @@ import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc
pub fn generate_model_file_str(source Source) !string {
actor_spec := if path := source.openapi_path {
actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)!
} else {
panic('No openapi or openrpc path provided')
}
else { panic('No openapi or openrpc path provided') }
return generate_model_file(actor_spec)!.write_str()!
}

View File

@@ -3,7 +3,7 @@ module specification
import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.code { Struct }
import freeflowuniverse.herolib.schemas.jsonschema { Schema, SchemaRef }
import freeflowuniverse.herolib.schemas.openapi { MediaType, OpenAPI, Parameter, Operation, OperationInfo }
import freeflowuniverse.herolib.schemas.openapi { MediaType, OpenAPI, OperationInfo, Parameter }
import freeflowuniverse.herolib.schemas.openrpc { ContentDescriptor, ErrorSpec, Example, ExamplePairing, ExampleRef }
// Helper function: Convert OpenAPI parameter to ContentDescriptor

View File

@@ -114,7 +114,7 @@ fn (mut f OpenAI) create_audio_request(args AudioArgs, endpoint string) !AudioRe
@[params]
pub struct CreateSpeechArgs {
pub:
model string = "tts_1"
model string = 'tts_1'
input string @[required]
voice Voice = .alloy
response_format AudioFormat = .mp3

View File

@@ -9,9 +9,9 @@ fn test_chat_completion() {
println(client.list_models()!)
raise("sss")
raise('sss')
res := client.chat_completion( Messages{
res := client.chat_completion(Messages{
messages: [
Message{
role: .user

View File

@@ -44,31 +44,31 @@ pub mut:
struct ChatMessagesRaw {
mut:
model string
messages []MessageRaw
temperature f64 = 0.5
model string
messages []MessageRaw
temperature f64 = 0.5
max_completion_tokens int = 32000
}
@[params]
pub struct CompletionArgs{
pub struct CompletionArgs {
pub mut:
model string
msgs Messages
temperature f64 = 0.5
model string
msgs Messages
temperature f64 = 0.5
max_completion_tokens int = 32000
}
// creates a new chat completion given a list of messages
// each message consists of message content and the role of the author
pub fn (mut f OpenAI) chat_completion(args_ CompletionArgs) !ChatCompletion {
mut args:=args_
if args.model==""{
mut args := args_
if args.model == '' {
args.model = f.model_default
}
mut m := ChatMessagesRaw{
model: args.model
temperature: args.temperature
model: args.model
temperature: args.temperature
max_completion_tokens: args.max_completion_tokens
}
for msg in args.msgs.messages {

View File

@@ -28,7 +28,9 @@ fn args_get(args_ ArgsGet) ArgsGet {
pub fn get(args_ ArgsGet) !&OpenAI {
mut context := base.context()!
mut args := args_get(args_)
mut obj := OpenAI{name:args.name}
mut obj := OpenAI{
name: args.name
}
if args.name !in openai_global {
if !exists(args)! {
set(obj)!

View File

@@ -22,44 +22,43 @@ const default = true
@[heap]
pub struct OpenAI {
pub mut:
name string = 'default'
api_key string
url string
name string = 'default'
api_key string
url string
model_default string
conn ?&httpconnection.HTTPConnection @[skip; str: skip]
conn ?&httpconnection.HTTPConnection @[skip; str: skip]
}
// your checking & initialization code if needed
fn obj_init(mycfg_ OpenAI) !OpenAI {
mut mycfg := mycfg_
if mycfg.api_key==""{
mut k:=os.getenv('AIKEY')
if k != ""{
mycfg.api_key = k
k=os.getenv('AIURL')
if k != ""{
mut mycfg := mycfg_
if mycfg.api_key == '' {
mut k := os.getenv('AIKEY')
if k != '' {
mycfg.api_key = k
k = os.getenv('AIURL')
if k != '' {
mycfg.url = k
}else{
return error("found AIKEY in env, but not AIURL")
}
k=os.getenv('AIMODEL')
if k != ""{
} else {
return error('found AIKEY in env, but not AIURL')
}
k = os.getenv('AIMODEL')
if k != '' {
mycfg.model_default = k
}
return mycfg
}
mycfg.url = "https://api.openai.com/v1/models"
k=os.getenv('OPENAI_API_KEY')
if k != ""{
mycfg.api_key = k
return mycfg
}
return mycfg
}
k=os.getenv('OPENROUTER_API_KEY')
if k != ""{
mycfg.api_key = k
mycfg.url = "https://openrouter.ai/api/v1"
return mycfg
mycfg.url = 'https://api.openai.com/v1/models'
k = os.getenv('OPENAI_API_KEY')
if k != '' {
mycfg.api_key = k
return mycfg
}
k = os.getenv('OPENROUTER_API_KEY')
if k != '' {
mycfg.api_key = k
mycfg.url = 'https://openrouter.ai/api/v1'
return mycfg
}
}
return mycfg
@@ -75,12 +74,12 @@ pub fn (mut client OpenAI) connection() !&httpconnection.HTTPConnection {
)!
c2
}
c.default_header.set(.authorization, 'Bearer ${client.api_key}')
client.conn = c
return c
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj OpenAI) !string {

View File

@@ -6,9 +6,9 @@ import freeflowuniverse.herolib.core.pathlib
import os
pub interface IFile {
name string
write(string, WriteOptions) !
write_str(WriteOptions) !string
name string
}
pub struct File {
@@ -124,7 +124,9 @@ pub fn (code VFile) write_str(options WriteOptions) !string {
''
}
mod_stmt := if code.mod == '' {''} else {
mod_stmt := if code.mod == '' {
''
} else {
'module ${code.mod}'
}
@@ -169,9 +171,9 @@ pub fn parse_vfile(code string) !VFile {
mut vfile := VFile{
content: code
}
lines := code.split_into_lines()
// Extract module name
for line in lines {
trimmed := line.trim_space()
@@ -180,7 +182,7 @@ pub fn parse_vfile(code string) !VFile {
break
}
}
// Extract imports
for line in lines {
trimmed := line.trim_space()
@@ -189,29 +191,29 @@ pub fn parse_vfile(code string) !VFile {
vfile.imports << import_obj
}
}
// Extract constants
vfile.consts = parse_consts(code) or { []Const{} }
// Split code into chunks for parsing structs and functions
mut chunks := []string{}
mut current_chunk := ''
mut brace_count := 0
mut in_struct_or_fn := false
mut comment_block := []string{}
for line in lines {
trimmed := line.trim_space()
// Collect comments
if trimmed.starts_with('//') && !in_struct_or_fn {
comment_block << line
continue
}
// Check for struct or function start
if (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ') ||
trimmed.starts_with('fn ') || trimmed.starts_with('pub fn ')) && !in_struct_or_fn {
if (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ')
|| trimmed.starts_with('fn ') || trimmed.starts_with('pub fn ')) && !in_struct_or_fn {
in_struct_or_fn = true
current_chunk = comment_block.join('\n')
if current_chunk != '' {
@@ -219,14 +221,14 @@ pub fn parse_vfile(code string) !VFile {
}
current_chunk += line
comment_block = []string{}
if line.contains('{') {
brace_count += line.count('{')
}
if line.contains('}') {
brace_count -= line.count('}')
}
if brace_count == 0 {
// Single line definition
chunks << current_chunk
@@ -235,18 +237,18 @@ pub fn parse_vfile(code string) !VFile {
}
continue
}
// Add line to current chunk if we're inside a struct or function
if in_struct_or_fn {
current_chunk += '\n' + line
if line.contains('{') {
brace_count += line.count('{')
}
if line.contains('}') {
brace_count -= line.count('}')
}
// Check if we've reached the end of the struct or function
if brace_count == 0 {
chunks << current_chunk
@@ -255,11 +257,11 @@ pub fn parse_vfile(code string) !VFile {
}
}
}
// Parse each chunk and add to items
for chunk in chunks {
trimmed := chunk.trim_space()
if trimmed.contains('struct ') || trimmed.contains('pub struct ') {
// Parse struct
struct_obj := parse_struct(chunk) or {
@@ -276,6 +278,6 @@ pub fn parse_vfile(code string) !VFile {
vfile.items << fn_obj
}
}
return vfile
}

View File

@@ -1,7 +1,7 @@
module code
fn test_parse_vfile() {
code := '
code := "
module test
import os
@@ -9,7 +9,7 @@ import strings
import freeflowuniverse.herolib.core.texttools
const (
VERSION = \'1.0.0\'
VERSION = '1.0.0'
DEBUG = true
)
@@ -21,7 +21,7 @@ pub mut:
// greet returns a greeting message
pub fn (p Person) greet() string {
return \'Hello, my name is \${p.name} and I am \${p.age} years old\'
return 'Hello, my name is \${p.name} and I am \${p.age} years old'
}
// create_person creates a new Person instance
@@ -31,7 +31,7 @@ pub fn create_person(name string, age int) Person {
age: age
}
}
'
"
vfile := parse_vfile(code) or {
assert false, 'Failed to parse VFile: ${err}'
@@ -50,7 +50,7 @@ pub fn create_person(name string, age int) Person {
// Test constants
assert vfile.consts.len == 2
assert vfile.consts[0].name == 'VERSION'
assert vfile.consts[0].value == '\'1.0.0\''
assert vfile.consts[0].value == "'1.0.0'"
assert vfile.consts[1].name == 'DEBUG'
assert vfile.consts[1].value == 'true'
@@ -68,13 +68,13 @@ pub fn create_person(name string, age int) Person {
// Test functions
functions := vfile.functions()
assert functions.len == 2
// Test method
assert functions[0].name == 'greet'
assert functions[0].is_pub == true
assert functions[0].receiver.typ.vgen() == 'Person'
assert functions[0].result.typ.vgen() == 'string'
// Test standalone function
assert functions[1].name == 'create_person'
assert functions[1].is_pub == true

View File

@@ -133,30 +133,30 @@ pub fn parse_function(code_ string) !Function {
// Extract the result type, handling the ! for result types
mut result_type := code.all_after(')').all_before('{').replace(' ', '')
mut has_return := false
// Check if the result type contains !
if result_type.contains('!') {
has_return = true
result_type = result_type.replace('!', '')
}
result := new_param(
v: result_type
)!
body := if code.contains('{') { code.all_after('{').all_before_last('}') } else { '' }
// Process the comments into a description
description := comment_lines.join('\n')
return Function{
name: name
receiver: receiver
params: params
result: result
body: body
name: name
receiver: receiver
params: params
result: result
body: body
description: description
is_pub: is_pub
has_return: has_return
is_pub: is_pub
has_return: has_return
}
}

View File

@@ -2,20 +2,20 @@ module code
fn test_parse_function_with_comments() {
// Test function string with comments
function_str := '// test_function is a simple function for testing the MCP tool code generation
function_str := "// test_function is a simple function for testing the MCP tool code generation
// It takes a config and returns a result
pub fn test_function(config TestConfig) !TestResult {
// This is just a mock implementation for testing purposes
if config.name == \'\' {
return error(\'Name cannot be empty\')
if config.name == '' {
return error('Name cannot be empty')
}
return TestResult{
success: config.enabled
message: \'Test completed for \${config.name}\'
message: 'Test completed for \${config.name}'
code: if config.enabled { 0 } else { 1 }
}
}'
}"
// Parse the function
function := parse_function(function_str) or {
@@ -30,7 +30,7 @@ pub fn test_function(config TestConfig) !TestResult {
assert function.params[0].name == 'config'
assert function.params[0].typ.symbol() == 'TestConfig'
assert function.result.typ.symbol() == 'TestResult'
// Verify that the comments were correctly parsed into the description
expected_description := 'test_function is a simple function for testing the MCP tool code generation
It takes a config and returns a result'
@@ -41,9 +41,9 @@ It takes a config and returns a result'
fn test_parse_function_without_comments() {
// Test function string without comments
function_str := 'fn simple_function(name string, count int) string {
return \'\${name} count: \${count}\'
}'
function_str := "fn simple_function(name string, count int) string {
return '\${name} count: \${count}'
}"
// Parse the function
function := parse_function(function_str) or {
@@ -60,7 +60,7 @@ fn test_parse_function_without_comments() {
assert function.params[1].name == 'count'
assert function.params[1].typ.symbol() == 'int'
assert function.result.typ.symbol() == 'string'
// Verify that there is no description
assert function.description == ''

View File

@@ -79,4 +79,4 @@ pub fn (mod Module) write_str() !string {
}
return out
}
}

View File

@@ -69,10 +69,11 @@ pub fn parse_struct(code_ string) !Struct {
trimmed := line.trim_space()
if !in_struct && trimmed.starts_with('//') {
comment_lines << trimmed.trim_string_left('//').trim_space()
} else if !in_struct && (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ')) {
} else if !in_struct && (trimmed.starts_with('struct ')
|| trimmed.starts_with('pub struct ')) {
in_struct = true
struct_lines << line
// Extract struct name
is_pub = trimmed.starts_with('pub ')
mut name_part := if is_pub {
@@ -80,7 +81,7 @@ pub fn parse_struct(code_ string) !Struct {
} else {
trimmed.trim_string_left('struct ').trim_space()
}
// Handle generics in struct name
if name_part.contains('<') {
struct_name = name_part.all_before('<').trim_space()
@@ -91,72 +92,71 @@ pub fn parse_struct(code_ string) !Struct {
}
} else if in_struct {
struct_lines << line
// Check if we've reached the end of the struct
if trimmed.starts_with('}') {
break
}
}
}
if struct_name == '' {
return error('Invalid struct format: could not extract struct name')
}
// Process the struct fields
mut fields := []StructField{}
mut current_section := ''
for i := 1; i < struct_lines.len - 1; i++ { // Skip the first and last lines (struct declaration and closing brace)
line := struct_lines[i].trim_space()
// Skip empty lines and comments
if line == '' || line.starts_with('//') {
continue
}
// Check for section markers (pub:, mut:, pub mut:)
if line.ends_with(':') {
current_section = line
continue
}
// Parse field
parts := line.split_any(' \t')
if parts.len < 2 {
continue // Skip invalid lines
}
field_name := parts[0]
field_type_str := parts[1..].join(' ')
// Parse the type string into a Type object
field_type := parse_type(field_type_str)
// Determine field visibility based on section
is_pub_field := current_section.contains('pub')
is_mut_field := current_section.contains('mut')
fields << StructField{
name: field_name
typ: field_type
name: field_name
typ: field_type
is_pub: is_pub_field
is_mut: is_mut_field
}
}
// Process the comments into a description
description := comment_lines.join('\n')
return Struct{
name: struct_name
name: struct_name
description: description
is_pub: is_pub
fields: fields
is_pub: is_pub
fields: fields
}
}
pub struct Interface {
pub mut:
name string

View File

@@ -21,17 +21,17 @@ pub:
It contains information about test execution'
assert result.is_pub == true
assert result.fields.len == 3
assert result.fields[0].name == 'success'
assert result.fields[0].typ.symbol() == 'bool'
assert result.fields[0].is_pub == true
assert result.fields[0].is_mut == false
assert result.fields[1].name == 'message'
assert result.fields[1].typ.symbol() == 'string'
assert result.fields[1].is_pub == true
assert result.fields[1].is_mut == false
assert result.fields[2].name == 'code'
assert result.fields[2].typ.symbol() == 'int'
assert result.fields[2].is_pub == true
@@ -55,17 +55,17 @@ mut:
assert result2.description == ''
assert result2.is_pub == false
assert result2.fields.len == 3
assert result2.fields[0].name == 'name'
assert result2.fields[0].typ.symbol() == 'string'
assert result2.fields[0].is_pub == true
assert result2.fields[0].is_mut == false
assert result2.fields[1].name == 'count'
assert result2.fields[1].typ.symbol() == 'int'
assert result2.fields[1].is_pub == false
assert result2.fields[1].is_mut == true
assert result2.fields[2].name == 'active'
assert result2.fields[2].typ.symbol() == 'bool'
assert result2.fields[2].is_pub == false

View File

@@ -239,7 +239,7 @@ pub fn (t Type) empty_value() string {
pub fn parse_type(type_str string) Type {
println('Parsing type string: "${type_str}"')
mut type_str_trimmed := type_str.trim_space()
// Handle struct definitions by extracting just the struct name
if type_str_trimmed.contains('struct ') {
lines := type_str_trimmed.split_into_lines()
@@ -257,7 +257,7 @@ pub fn parse_type(type_str string) Type {
}
}
}
// Check for simple types first
if type_str_trimmed == 'string' {
return String{}
@@ -266,41 +266,61 @@ pub fn parse_type(type_str string) Type {
} else if type_str_trimmed == 'int' {
return Integer{}
} else if type_str_trimmed == 'u8' {
return Integer{bytes: 8, signed: false}
return Integer{
bytes: 8
signed: false
}
} else if type_str_trimmed == 'u16' {
return Integer{bytes: 16, signed: false}
return Integer{
bytes: 16
signed: false
}
} else if type_str_trimmed == 'u32' {
return Integer{bytes: 32, signed: false}
return Integer{
bytes: 32
signed: false
}
} else if type_str_trimmed == 'u64' {
return Integer{bytes: 64, signed: false}
return Integer{
bytes: 64
signed: false
}
} else if type_str_trimmed == 'i8' {
return Integer{bytes: 8}
return Integer{
bytes: 8
}
} else if type_str_trimmed == 'i16' {
return Integer{bytes: 16}
return Integer{
bytes: 16
}
} else if type_str_trimmed == 'i32' {
return Integer{bytes: 32}
return Integer{
bytes: 32
}
} else if type_str_trimmed == 'i64' {
return Integer{bytes: 64}
return Integer{
bytes: 64
}
}
// Check for array types
if type_str_trimmed.starts_with('[]') {
elem_type := type_str_trimmed.all_after('[]')
return Array{parse_type(elem_type)}
}
// Check for map types
if type_str_trimmed.starts_with('map[') && type_str_trimmed.contains(']') {
value_type := type_str_trimmed.all_after(']')
return Map{parse_type(value_type)}
}
// Check for result types
if type_str_trimmed.starts_with('!') {
result_type := type_str_trimmed.all_after('!')
return Result{parse_type(result_type)}
}
// If no other type matches, treat as an object/struct type
println('Treating as object type: "${type_str_trimmed}"')
return Object{type_str_trimmed}

View File

@@ -66,15 +66,17 @@ fn find_closing_brace(content string, start_i int) ?int {
// RETURNS:
// string - the function block including comments, or error if not found
pub fn get_function_from_file(file_path string, function_name string) !Function {
content := os.read_file(file_path) or { return error('Failed to read file ${file_path}: ${err}') }
content := os.read_file(file_path) or {
return error('Failed to read file ${file_path}: ${err}')
}
vfile := parse_vfile(content) or { return error('Failed to parse file ${file_path}: ${err}') }
if fn_obj := vfile.get_function(function_name) {
return fn_obj
}
return error('function ${function_name} not found in file ${file_path}')
}
return error('function ${function_name} not found in file ${file_path}')
}
// get_function_from_module searches for a function in all V files within a module
@@ -91,15 +93,11 @@ pub fn get_function_from_module(module_path string, function_name string) !Funct
log.error('Found ${v_files} V files in ${module_path}')
for v_file in v_files {
// Read the file content
content := os.read_file(v_file) or {
continue
}
content := os.read_file(v_file) or { continue }
// Parse the file
vfile := parse_vfile(content) or {
continue
}
vfile := parse_vfile(content) or { continue }
// Look for the function
if fn_obj := vfile.get_function(function_name) {
return fn_obj
@@ -139,7 +137,7 @@ pub fn get_type_from_module(module_path string, type_name string) !string {
if i == -1 {
type_import := content.split_into_lines().filter(it.contains('import')
&& it.contains(type_name))
&& it.contains(type_name))
if type_import.len > 0 {
log.debug('debugzoooo')
mod := type_import[0].trim_space().trim_string_left('import ').all_before(' ')

View File

@@ -8,17 +8,17 @@ pub:
data []u8
}
// to_bytes converts a Currency to serialized bytes
// to_bytes converts a Currency to serialized bytes
pub fn (c Currency) to_bytes() !CurrencyBytes {
mut enc := encoder.new()
// Add unique encoding ID to identify this type of data
enc.add_u16(500) // Unique ID for Currency type
// Encode Currency fields
enc.add_string(c.name)
enc.add_f64(c.usdval)
return CurrencyBytes{
data: enc.data
}
@@ -28,16 +28,16 @@ pub fn (c Currency) to_bytes() !CurrencyBytes {
pub fn from_bytes(bytes CurrencyBytes) !Currency {
mut d := encoder.decoder_new(bytes.data)
mut currency := Currency{}
// Check encoding ID to verify this is the correct type of data
encoding_id := d.get_u16()!
if encoding_id != 500 {
return error('Wrong file type: expected encoding ID 500, got ${encoding_id}, for currency')
}
// Decode Currency fields
currency.name = d.get_string()!
currency.usdval = d.get_f64()!
return currency
}

View File

@@ -241,6 +241,6 @@ pub fn (mut d Decoder) get_map_bytes() !map[string][]u8 {
// Gets GID from encoded string
pub fn (mut d Decoder) get_gid() !gid.GID {
gid_str := d.get_string()!
return gid.new(gid_str)
gid_str := d.get_string()!
return gid.new(gid_str)
}

View File

@@ -191,17 +191,17 @@ fn test_map_bytes() {
fn test_gid() {
// Test with a standard GID
mut e := new()
mut g1 := gid.new("myproject:123")!
mut g1 := gid.new('myproject:123')!
e.add_gid(g1)
// Test with a GID that has a default circle name
mut g2 := gid.new_from_parts("", 999)!
mut g2 := gid.new_from_parts('', 999)!
e.add_gid(g2)
// Test with a GID that has spaces before fixing
mut g3 := gid.new("project1:456")!
mut g3 := gid.new('project1:456')!
e.add_gid(g3)
mut d := decoder_new(e.data)
assert d.get_gid()!.str() == g1.str()
assert d.get_gid()!.str() == g2.str()
@@ -211,74 +211,74 @@ fn test_gid() {
fn test_currency() {
// Create USD currency manually
mut usd_curr := currency.Currency{
name: 'USD'
name: 'USD'
usdval: 1.0
}
// Create EUR currency manually
mut eur_curr := currency.Currency{
name: 'EUR'
name: 'EUR'
usdval: 1.1
}
// Create Bitcoin currency manually
mut btc_curr := currency.Currency{
name: 'BTC'
name: 'BTC'
usdval: 60000.0
}
// Create TFT currency manually
mut tft_curr := currency.Currency{
name: 'TFT'
name: 'TFT'
usdval: 0.05
}
// Create currency amounts
mut usd_amount := currency.Amount{
currency: usd_curr
val: 1.5
val: 1.5
}
mut eur_amount := currency.Amount{
currency: eur_curr
val: 100.0
val: 100.0
}
mut btc_amount := currency.Amount{
currency: btc_curr
val: 0.01
val: 0.01
}
mut tft_amount := currency.Amount{
currency: tft_curr
val: 1000.0
val: 1000.0
}
mut e := new()
e.add_currency(usd_amount)
e.add_currency(eur_amount)
e.add_currency(btc_amount)
e.add_currency(tft_amount)
mut d := decoder_new(e.data)
// Override the currency.get function by manually checking currency names
// since we can't rely on the global currency functions for testing
mut decoded_curr1 := d.get_string()!
mut decoded_val1 := d.get_f64()!
assert decoded_curr1 == 'USD'
assert math.abs(decoded_val1 - 1.5) < 0.00001
mut decoded_curr2 := d.get_string()!
mut decoded_val2 := d.get_f64()!
assert decoded_curr2 == 'EUR'
assert math.abs(decoded_val2 - 100.0) < 0.00001
mut decoded_curr3 := d.get_string()!
mut decoded_val3 := d.get_f64()!
assert decoded_curr3 == 'BTC'
assert math.abs(decoded_val3 - 0.01) < 0.00001
mut decoded_curr4 := d.get_string()!
mut decoded_val4 := d.get_f64()!
assert decoded_curr4 == 'TFT'

View File

@@ -31,23 +31,23 @@ pub fn new(txt_ string) !GID {
}
cid_str := parts[1].trim_space()
cid := cid_str.u32() //TODO: what if this is no nr?
cid := cid_str.u32() // TODO: what if this is no nr?
return GID{
circle: circle
cid: cid
cid: cid
}
}
pub fn new_from_parts(circle_ string, cid u32) !GID {
mut circle:=circle_
mut circle := circle_
if circle.trim_space() == '' {
circle="default"
circle = 'default'
}
return GID{
circle: circle
cid: cid
cid: cid
}
}

View File

@@ -4,7 +4,7 @@ import os
// Define a struct for test cases
struct PrefixEdgeCaseTest {
prefix string
prefix string
expected_keys []string
}
@@ -17,10 +17,20 @@ fn test_edge_case_prefix_search() {
// Keys with a common prefix that may cause issues
keys := [
'test', 'testing', 'tea', 'team', 'technology',
'apple', 'application', 'appreciate',
'banana', 'bandage', 'band',
'car', 'carpet', 'carriage'
'test',
'testing',
'tea',
'team',
'technology',
'apple',
'application',
'appreciate',
'banana',
'bandage',
'band',
'car',
'carpet',
'carriage',
]
// Insert all keys
@@ -36,59 +46,58 @@ fn test_edge_case_prefix_search() {
test_cases := [
// prefix, expected_keys
PrefixEdgeCaseTest{
prefix: 'te'
prefix: 'te'
expected_keys: ['test', 'testing', 'tea', 'team', 'technology']
},
PrefixEdgeCaseTest{
prefix: 'tes'
prefix: 'tes'
expected_keys: ['test', 'testing']
},
PrefixEdgeCaseTest{
prefix: 'tea'
prefix: 'tea'
expected_keys: ['tea', 'team']
},
PrefixEdgeCaseTest{
prefix: 'a'
prefix: 'a'
expected_keys: ['apple', 'application', 'appreciate']
},
PrefixEdgeCaseTest{
prefix: 'ba'
prefix: 'ba'
expected_keys: ['banana', 'bandage', 'band']
},
PrefixEdgeCaseTest{
prefix: 'ban'
prefix: 'ban'
expected_keys: ['banana', 'band']
},
PrefixEdgeCaseTest{
prefix: 'c'
prefix: 'c'
expected_keys: ['car', 'carpet', 'carriage']
}
},
]
for test_case in test_cases {
prefix := test_case.prefix
expected_keys := test_case.expected_keys
result := tree.list(prefix) or {
assert false, 'Failed to list keys with prefix "${prefix}": ${err}'
return
}
// Check count matches
assert result.len == expected_keys.len,
'For prefix "${prefix}": expected ${expected_keys.len} keys, got ${result.len} (keys: ${result})'
assert result.len == expected_keys.len, 'For prefix "${prefix}": expected ${expected_keys.len} keys, got ${result.len} (keys: ${result})'
// Check all expected keys are present
for key in expected_keys {
assert key in result, 'Key "${key}" missing from results for prefix "${prefix}"'
}
// Verify each result starts with the prefix
for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
}
}
println('All edge case prefix tests passed successfully!')
}
@@ -102,8 +111,13 @@ fn test_tricky_insertion_order() {
// Insert keys in a specific order that might trigger the issue
// Insert 'team' first, then 'test', etc. to ensure tree layout is challenging
tricky_keys := [
'team', 'test', 'technology', 'tea', // 'te' prefix cases
'car', 'carriage', 'carpet' // 'ca' prefix cases
'team',
'test',
'technology',
'tea', // 'te' prefix cases
'car',
'carriage',
'carpet', // 'ca' prefix cases
]
// Insert all keys
@@ -114,7 +128,7 @@ fn test_tricky_insertion_order() {
return
}
}
// Test 'te' prefix
te_results := tree.list('te') or {
assert false, 'Failed to list keys with prefix "te": ${err}'
@@ -125,7 +139,7 @@ fn test_tricky_insertion_order() {
assert 'test' in te_results, 'Expected "test" in results'
assert 'technology' in te_results, 'Expected "technology" in results'
assert 'tea' in te_results, 'Expected "tea" in results'
// Test 'ca' prefix
ca_results := tree.list('ca') or {
assert false, 'Failed to list keys with prefix "ca": ${err}'
@@ -135,6 +149,6 @@ fn test_tricky_insertion_order() {
assert 'car' in ca_results, 'Expected "car" in results'
assert 'carriage' in ca_results, 'Expected "carriage" in results'
assert 'carpet' in ca_results, 'Expected "carpet" in results'
println('All tricky insertion order tests passed successfully!')
}
}

View File

@@ -4,7 +4,7 @@ import os
// Define a struct for test cases
struct PrefixTestCase {
prefix string
prefix string
expected_count int
}
@@ -17,13 +17,31 @@ fn test_complex_prefix_search() {
// Insert a larger set of keys with various prefixes
keys := [
'a', 'ab', 'abc', 'abcd', 'abcde',
'b', 'bc', 'bcd', 'bcde',
'c', 'cd', 'cde',
'x', 'xy', 'xyz',
'test', 'testing', 'tested', 'tests',
'team', 'teammate', 'teams',
'tech', 'technology', 'technical'
'a',
'ab',
'abc',
'abcd',
'abcde',
'b',
'bc',
'bcd',
'bcde',
'c',
'cd',
'cde',
'x',
'xy',
'xyz',
'test',
'testing',
'tested',
'tests',
'team',
'teammate',
'teams',
'tech',
'technology',
'technical',
]
// Insert all keys
@@ -54,8 +72,8 @@ fn test_complex_prefix_search() {
PrefixTestCase{'x', 3},
PrefixTestCase{'xy', 2},
PrefixTestCase{'xyz', 1},
PrefixTestCase{'z', 0}, // No matches
PrefixTestCase{'', keys.len} // All keys
PrefixTestCase{'z', 0}, // No matches
PrefixTestCase{'', keys.len}, // All keys
]
for test_case in test_cases {
@@ -70,7 +88,7 @@ fn test_complex_prefix_search() {
}
assert result.len == expected_count, 'For prefix "${prefix}": expected ${expected_count} keys, got ${result.len}'
// Verify each result starts with the prefix
for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
@@ -87,13 +105,21 @@ fn test_special_prefix_search() {
// Insert keys with special characters and longer strings
special_keys := [
'user:1:profile', 'user:1:settings', 'user:1:posts',
'user:2:profile', 'user:2:settings',
'config:app:name', 'config:app:version', 'config:app:debug',
'config:db:host', 'config:db:port',
'data:2023:01:01', 'data:2023:01:02', 'data:2023:02:01',
'user:1:profile',
'user:1:settings',
'user:1:posts',
'user:2:profile',
'user:2:settings',
'config:app:name',
'config:app:version',
'config:app:debug',
'config:db:host',
'config:db:port',
'data:2023:01:01',
'data:2023:01:02',
'data:2023:02:01',
'very:long:key:with:multiple:segments:and:special:characters:!@#$%^&*()',
'another:very:long:key:with:different:segments'
'another:very:long:key:with:different:segments',
]
// Insert all keys
@@ -118,7 +144,7 @@ fn test_special_prefix_search() {
PrefixTestCase{'data:2023:01:', 2},
PrefixTestCase{'very:', 1},
PrefixTestCase{'another:', 1},
PrefixTestCase{'nonexistent:', 0}
PrefixTestCase{'nonexistent:', 0},
]
for test_case in special_test_cases {
@@ -133,7 +159,7 @@ fn test_special_prefix_search() {
}
assert result.len == expected_count, 'For prefix "${prefix}": expected ${expected_count} keys, got ${result.len}'
// Verify each result starts with the prefix
for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
@@ -151,9 +177,9 @@ fn test_prefix_search_performance() {
// Generate a larger dataset (1000 keys)
prefixes := ['user', 'config', 'data', 'app', 'service', 'api', 'test', 'dev', 'prod', 'staging']
mut large_keys := []string{}
for prefix in prefixes {
for i in 0..100 {
for i in 0 .. 100 {
large_keys << '${prefix}:${i}:name'
}
}
@@ -175,7 +201,7 @@ fn test_prefix_search_performance() {
}
assert result.len == 100, 'For prefix "${prefix}:": expected 100 keys, got ${result.len}'
// Verify each result starts with the prefix
for key in result {
assert key.starts_with(prefix + ':'), 'Key "${key}" does not start with prefix "${prefix}:"'
@@ -184,7 +210,7 @@ fn test_prefix_search_performance() {
// Test more specific prefixes
for prefix in prefixes {
for i in 0..10 {
for i in 0 .. 10 {
specific_prefix := '${prefix}:${i}'
result := tree.list(specific_prefix) or {
assert false, 'Failed to list keys with prefix "${specific_prefix}": ${err}'
@@ -195,4 +221,4 @@ fn test_prefix_search_performance() {
assert result[0] == '${specific_prefix}:name', 'Expected "${specific_prefix}:name", got "${result[0]}"'
}
}
}
}

View File

@@ -62,11 +62,11 @@ fn deserialize_node(data []u8) !Node {
right_id := d.get_u32()!
return Node{
character: character
character: character
is_end_of_string: is_end_of_string
value: value
left_id: left_id
middle_id: middle_id
right_id: right_id
value: value
left_id: left_id
middle_id: middle_id
right_id: right_id
}
}
}

View File

@@ -4,23 +4,23 @@ module tst
fn test_node_serialization() {
// Create a leaf node (end of string)
leaf_node := Node{
character: `a`
character: `a`
is_end_of_string: true
value: 'test value'.bytes()
left_id: 0
middle_id: 0
right_id: 0
value: 'test value'.bytes()
left_id: 0
middle_id: 0
right_id: 0
}
// Serialize the leaf node
leaf_data := serialize_node(leaf_node)
// Deserialize and verify
deserialized_leaf := deserialize_node(leaf_data) or {
assert false, 'Failed to deserialize leaf node: ${err}'
return
}
assert deserialized_leaf.character == leaf_node.character, 'Character mismatch'
assert deserialized_leaf.is_end_of_string == leaf_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_leaf.value.bytestr() == leaf_node.value.bytestr(), 'Value mismatch'
@@ -30,23 +30,23 @@ fn test_node_serialization() {
// Create an internal node (not end of string)
internal_node := Node{
character: `b`
character: `b`
is_end_of_string: false
value: []u8{}
left_id: 10
middle_id: 20
right_id: 30
value: []u8{}
left_id: 10
middle_id: 20
right_id: 30
}
// Serialize the internal node
internal_data := serialize_node(internal_node)
// Deserialize and verify
deserialized_internal := deserialize_node(internal_data) or {
assert false, 'Failed to deserialize internal node: ${err}'
return
}
assert deserialized_internal.character == internal_node.character, 'Character mismatch'
assert deserialized_internal.is_end_of_string == internal_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_internal.value.len == 0, 'Value should be empty'
@@ -56,23 +56,23 @@ fn test_node_serialization() {
// Create a root node
root_node := Node{
character: 0 // null character for root
character: 0 // null character for root
is_end_of_string: false
value: []u8{}
left_id: 5
middle_id: 15
right_id: 25
value: []u8{}
left_id: 5
middle_id: 15
right_id: 25
}
// Serialize the root node
root_data := serialize_node(root_node)
// Deserialize and verify
deserialized_root := deserialize_node(root_data) or {
assert false, 'Failed to deserialize root node: ${err}'
return
}
assert deserialized_root.character == root_node.character, 'Character mismatch'
assert deserialized_root.is_end_of_string == root_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_root.value.len == 0, 'Value should be empty'
@@ -85,23 +85,23 @@ fn test_node_serialization() {
fn test_special_serialization() {
// Create a node with special character
special_node := Node{
character: `!` // special character
character: `!` // special character
is_end_of_string: true
value: 'special value with spaces and symbols: !@#$%^&*()'.bytes()
left_id: 42
middle_id: 99
right_id: 123
value: 'special value with spaces and symbols: !@#$%^&*()'.bytes()
left_id: 42
middle_id: 99
right_id: 123
}
// Serialize the special node
special_data := serialize_node(special_node)
// Deserialize and verify
deserialized_special := deserialize_node(special_data) or {
assert false, 'Failed to deserialize special node: ${err}'
return
}
assert deserialized_special.character == special_node.character, 'Character mismatch'
assert deserialized_special.is_end_of_string == special_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_special.value.bytestr() == special_node.value.bytestr(), 'Value mismatch'
@@ -111,37 +111,37 @@ fn test_special_serialization() {
// Create a node with a large value
mut large_value := []u8{len: 1000}
for i in 0..1000 {
for i in 0 .. 1000 {
large_value[i] = u8(i % 256)
}
large_node := Node{
character: `z`
character: `z`
is_end_of_string: true
value: large_value
left_id: 1
middle_id: 2
right_id: 3
value: large_value
left_id: 1
middle_id: 2
right_id: 3
}
// Serialize the large node
large_data := serialize_node(large_node)
// Deserialize and verify
deserialized_large := deserialize_node(large_data) or {
assert false, 'Failed to deserialize large node: ${err}'
return
}
assert deserialized_large.character == large_node.character, 'Character mismatch'
assert deserialized_large.is_end_of_string == large_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_large.value.len == large_node.value.len, 'Value length mismatch'
// Check each byte of the large value
for i in 0..large_node.value.len {
for i in 0 .. large_node.value.len {
assert deserialized_large.value[i] == large_node.value[i], 'Value byte mismatch at index ${i}'
}
assert deserialized_large.left_id == large_node.left_id, 'left_id mismatch'
assert deserialized_large.middle_id == large_node.middle_id, 'middle_id mismatch'
assert deserialized_large.right_id == large_node.right_id, 'right_id mismatch'
@@ -151,24 +151,24 @@ fn test_special_serialization() {
fn test_version_handling() {
// Create a valid node
valid_node := Node{
character: `a`
character: `a`
is_end_of_string: true
value: 'test'.bytes()
left_id: 0
middle_id: 0
right_id: 0
value: 'test'.bytes()
left_id: 0
middle_id: 0
right_id: 0
}
// Serialize the node
mut valid_data := serialize_node(valid_node)
// Corrupt the version byte
valid_data[0] = 99 // Invalid version
// Attempt to deserialize with invalid version
deserialize_node(valid_data) or {
assert err.str().contains('Invalid version byte'), 'Expected version error, got: ${err}'
return
}
assert false, 'Expected error for invalid version byte'
}
}

View File

@@ -6,9 +6,9 @@ module tst
// - replaces special characters with standard ones
pub fn namefix(s string) string {
mut result := s.trim_space().to_lower()
// Replace any problematic characters or sequences if needed
// For this implementation, we'll keep it simple
return result
}
}

View File

@@ -5,12 +5,12 @@ import freeflowuniverse.herolib.data.ourdb
// Represents a node in the ternary search tree
struct Node {
mut:
character u8 // The character stored at this nodexs
is_end_of_string bool // Flag indicating if this node represents the end of a key
value []u8 // The value associated with the key (if this node is the end of a key)
left_id u32 // Database ID for left child (character < node.character)
middle_id u32 // Database ID for middle child (character == node.character)
right_id u32 // Database ID for right child (character > node.character)
character u8 // The character stored at this nodexs
is_end_of_string bool // Flag indicating if this node represents the end of a key
value []u8 // The value associated with the key (if this node is the end of a key)
left_id u32 // Database ID for left child (character < node.character)
middle_id u32 // Database ID for middle child (character == node.character)
right_id u32 // Database ID for right child (character > node.character)
}
// TST represents a ternary search tree data structure
@@ -39,18 +39,18 @@ pub fn new(args NewArgs) !TST {
)!
mut root_id := u32(1) // First ID in ourdb is now 1 instead of 0
if db.get_next_id()! == 1 {
// Create a new root node if the database is empty
// We'll use a null character (0) for the root node
println('Creating new root node')
root := Node{
character: 0
character: 0
is_end_of_string: false
value: []u8{}
left_id: 0
middle_id: 0
right_id: 0
value: []u8{}
left_id: 0
middle_id: 0
right_id: 0
}
root_id = db.set(data: serialize_node(root))!
println('Root node created with ID: ${root_id}')
@@ -74,7 +74,7 @@ pub fn new(args NewArgs) !TST {
pub fn (mut self TST) set(key string, value []u8) ! {
normalized_key := namefix(key)
println('Setting key: "${key}" (normalized: "${normalized_key}")')
if normalized_key.len == 0 {
return error('Empty key not allowed')
}
@@ -83,12 +83,12 @@ pub fn (mut self TST) set(key string, value []u8) ! {
if self.root_id == 0 {
println('Tree is empty, creating root node')
root := Node{
character: 0
character: 0
is_end_of_string: false
value: []u8{}
left_id: 0
middle_id: 0
right_id: 0
value: []u8{}
left_id: 0
middle_id: 0
right_id: 0
}
self.root_id = self.db.set(data: serialize_node(root))!
println('Root node created with ID: ${self.root_id}')
@@ -97,12 +97,12 @@ pub fn (mut self TST) set(key string, value []u8) ! {
// Insert the key-value pair
mut last_node_id := self.insert_recursive(self.root_id, normalized_key, 0, value)!
println('Key "${normalized_key}" inserted to node ${last_node_id}')
// Make sure the last node is marked as end of string with the value
if last_node_id != 0 {
node_data := self.db.get(last_node_id)!
mut node := deserialize_node(node_data)!
// Ensure this node is marked as the end of a string
if !node.is_end_of_string {
println('Setting node ${last_node_id} as end of string')
@@ -111,7 +111,7 @@ pub fn (mut self TST) set(key string, value []u8) ! {
self.db.set(id: last_node_id, data: serialize_node(node))!
}
}
println('Key "${normalized_key}" inserted successfully')
}
@@ -126,33 +126,33 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
// If we've reached the end of the tree, create a new node
if node_id == 0 {
println('Creating new node for character: ${key[pos]} (${key[pos].ascii_str()}) at position ${pos}')
// Create a node for this character
new_node := Node{
character: key[pos]
character: key[pos]
is_end_of_string: pos == key.len - 1
value: if pos == key.len - 1 { value.clone() } else { []u8{} }
left_id: 0
middle_id: 0
right_id: 0
value: if pos == key.len - 1 { value.clone() } else { []u8{} }
left_id: 0
middle_id: 0
right_id: 0
}
new_id := self.db.set(data: serialize_node(new_node))!
println('New node created with ID: ${new_id}, character: ${key[pos]} (${key[pos].ascii_str()}), is_end: ${pos == key.len - 1}')
// If this is the last character in the key, we're done
if pos == key.len - 1 {
return new_id
}
// Otherwise, create the next node in the sequence and link to it
next_id := self.insert_recursive(0, key, pos + 1, value)!
// Update the middle link
node_data := self.db.get(new_id)!
mut updated_node := deserialize_node(node_data)!
updated_node.middle_id = next_id
self.db.set(id: new_id, data: serialize_node(updated_node))!
return new_id
}
@@ -161,14 +161,14 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
println('Failed to get node data for ID ${node_id}')
return error('Node retrieval error: ${err}')
}
mut node := deserialize_node(node_data) or {
println('Failed to deserialize node with ID ${node_id}')
return error('Node deserialization error: ${err}')
}
println('Node ${node_id}: character=${node.character} (${node.character.ascii_str()}), is_end=${node.is_end_of_string}, left=${node.left_id}, middle=${node.middle_id}, right=${node.right_id}')
// Compare the current character with the node's character
if key[pos] < node.character {
println('Going left for character: ${key[pos]} (${key[pos].ascii_str()}) < ${node.character} (${node.character.ascii_str()})')
@@ -189,7 +189,7 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
node.value = value
self.db.set(id: node_id, data: serialize_node(node))!
} else {
println('Going middle for next character: ${key[pos+1]} (${key[pos+1].ascii_str()})')
println('Going middle for next character: ${key[pos + 1]} (${key[pos + 1].ascii_str()})')
// Move to the next character in the key
node.middle_id = self.insert_recursive(node.middle_id, key, pos + 1, value)!
self.db.set(id: node_id, data: serialize_node(node))!
@@ -203,7 +203,7 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
pub fn (mut self TST) get(key string) ![]u8 {
normalized_key := namefix(key)
println('Getting key: "${key}" (normalized: "${normalized_key}")')
if normalized_key.len == 0 {
return error('Empty key not allowed')
}
@@ -222,48 +222,44 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
println('Node ID is 0, key not found')
return error('Key not found')
}
if pos >= key.len {
println('Position ${pos} out of bounds for key "${key}"')
return error('Key not found - position out of bounds')
}
// Get the node
node_data := self.db.get(node_id) or {
println('Failed to get node ${node_id}')
return error('Node not found in database')
}
node := deserialize_node(node_data) or {
println('Failed to deserialize node ${node_id}')
return error('Failed to deserialize node')
}
println('Searching node ${node_id}: char=${node.character}, pos=${pos}, key_char=${key[pos]}')
mut result := []u8{}
// Left branch
if key[pos] < node.character {
println('Going left')
result = self.search_recursive(node.left_id, key, pos) or {
return error(err.str())
}
result = self.search_recursive(node.left_id, key, pos) or { return error(err.str()) }
return result
}
// Right branch
if key[pos] > node.character {
println('Going right')
result = self.search_recursive(node.right_id, key, pos) or {
return error(err.str())
}
result = self.search_recursive(node.right_id, key, pos) or { return error(err.str()) }
return result
}
// Character matches
println('Character match')
// At end of key
if pos == key.len - 1 {
if node.is_end_of_string {
@@ -278,17 +274,15 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
return error('Key not found - not marked as end of string')
}
}
// Not at end of key, go to middle
if node.middle_id == 0 {
println('No middle child')
return error('Key not found - no middle child')
}
println('Going to middle child')
result = self.search_recursive(node.middle_id, key, pos + 1) or {
return error(err.str())
}
result = self.search_recursive(node.middle_id, key, pos + 1) or { return error(err.str()) }
return result
}
@@ -296,7 +290,7 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
pub fn (mut self TST) delete(key string) ! {
normalized_key := namefix(key)
println('Deleting key: "${key}" (normalized: "${normalized_key}")')
if normalized_key.len == 0 {
return error('Empty key not allowed')
}
@@ -315,7 +309,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Node ID is 0, key not found')
return error('Key not found')
}
// Check for position out of bounds
if pos >= key.len {
println('Position ${pos} is out of bounds for key "${key}"')
@@ -327,12 +321,12 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Failed to get node data for ID ${node_id}')
return error('Node retrieval error: ${err}')
}
mut node := deserialize_node(node_data) or {
println('Failed to deserialize node with ID ${node_id}')
return error('Node deserialization error: ${err}')
}
println('Deleting from node ${node_id}: character=${node.character} (${node.character.ascii_str()}), is_end=${node.is_end_of_string}, left=${node.left_id}, middle=${node.middle_id}, right=${node.right_id}, pos=${pos}')
mut deleted := false
@@ -343,7 +337,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Left child is null, key not found')
return error('Key not found')
}
deleted = self.delete_recursive(node.left_id, key, pos)!
if deleted && node.left_id != 0 {
// Check if the left child has been deleted
@@ -364,7 +358,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Right child is null, key not found')
return error('Key not found')
}
deleted = self.delete_recursive(node.right_id, key, pos)!
if deleted && node.right_id != 0 {
// Check if the right child has been deleted
@@ -405,12 +399,12 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
}
} else {
// Move to the next character in the key
println('Moving to next character: ${key[pos+1]} (${key[pos+1].ascii_str()})')
println('Moving to next character: ${key[pos + 1]} (${key[pos + 1].ascii_str()})')
if node.middle_id == 0 {
println('Middle child is null, key not found')
return error('Key not found')
}
deleted = self.delete_recursive(node.middle_id, key, pos + 1)!
if deleted && node.middle_id != 0 {
// Check if the middle child has been deleted

View File

@@ -18,17 +18,17 @@ pub fn (mut self TST) list(prefix string) ![]string {
// Find the prefix node first
result_info := self.navigate_to_prefix(self.root_id, normalized_prefix, 0)
if !result_info.found {
println('Prefix node not found for "${normalized_prefix}"')
return result // Empty result
}
println('Found node for prefix "${normalized_prefix}" at node ${result_info.node_id}, collecting keys')
// Collect all keys from the subtree rooted at the prefix node
self.collect_keys_with_prefix(result_info.node_id, result_info.prefix, mut result)!
println('Found ${result.len} keys with prefix "${normalized_prefix}": ${result}')
return result
}
@@ -45,23 +45,31 @@ fn (mut self TST) navigate_to_prefix(node_id u32, prefix string, pos int) Prefix
// Base case: no node or out of bounds
if node_id == 0 || pos >= prefix.len {
return PrefixSearchResult{
found: false
found: false
node_id: 0
prefix: ''
prefix: ''
}
}
// Get node
node_data := self.db.get(node_id) or {
return PrefixSearchResult{found: false, node_id: 0, prefix: ''}
return PrefixSearchResult{
found: false
node_id: 0
prefix: ''
}
}
node := deserialize_node(node_data) or {
return PrefixSearchResult{found: false, node_id: 0, prefix: ''}
return PrefixSearchResult{
found: false
node_id: 0
prefix: ''
}
}
println('Navigating node ${node_id}: char=${node.character} (${node.character.ascii_str()}), pos=${pos}, prefix_char=${prefix[pos]} (${prefix[pos].ascii_str()})')
// Character comparison
if prefix[pos] < node.character {
// Go left
@@ -74,24 +82,28 @@ fn (mut self TST) navigate_to_prefix(node_id u32, prefix string, pos int) Prefix
} else {
// Character match
println('Character match found')
// Check if we're at the end of the prefix
if pos == prefix.len - 1 {
println('Reached end of prefix at node ${node_id}')
// Return the exact prefix string that was passed in
return PrefixSearchResult{
found: true
found: true
node_id: node_id
prefix: prefix
prefix: prefix
}
}
// Not at end of prefix, check middle child
if node.middle_id == 0 {
println('No middle child, prefix not found')
return PrefixSearchResult{found: false, node_id: 0, prefix: ''}
return PrefixSearchResult{
found: false
node_id: 0
prefix: ''
}
}
// Continue to middle child with next character
return self.navigate_to_prefix(node.middle_id, prefix, pos + 1)
}
@@ -102,17 +114,17 @@ fn (mut self TST) collect_keys_with_prefix(node_id u32, prefix string, mut resul
if node_id == 0 {
return
}
// Get node
node_data := self.db.get(node_id) or { return }
node := deserialize_node(node_data) or { return }
println('Collecting from node ${node_id}, char=${node.character} (${node.character.ascii_str()}), prefix="${prefix}"')
// If this node is an end of string and it's not the root, we found a key
if node.is_end_of_string && node.character != 0 {
// The prefix may already contain this node's character
if prefix.len == 0 || prefix[prefix.len-1] != node.character {
if prefix.len == 0 || prefix[prefix.len - 1] != node.character {
println('Found complete key: "${prefix}${node.character.ascii_str()}"')
result << prefix + node.character.ascii_str()
} else {
@@ -120,24 +132,24 @@ fn (mut self TST) collect_keys_with_prefix(node_id u32, prefix string, mut resul
result << prefix
}
}
// Recursively search all children
if node.left_id != 0 {
self.collect_keys_with_prefix(node.left_id, prefix, mut result)!
}
// For middle child, we need to add this node's character to the prefix
if node.middle_id != 0 {
mut next_prefix := prefix
if node.character != 0 { // Skip root node
// Only add the character if it's not already at the end of the prefix
if prefix.len == 0 || prefix[prefix.len-1] != node.character {
if prefix.len == 0 || prefix[prefix.len - 1] != node.character {
next_prefix += node.character.ascii_str()
}
}
self.collect_keys_with_prefix(node.middle_id, next_prefix, mut result)!
}
if node.right_id != 0 {
self.collect_keys_with_prefix(node.right_id, prefix, mut result)!
}
@@ -148,19 +160,19 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
if node_id == 0 {
return
}
// Get node
node_data := self.db.get(node_id) or { return }
node := deserialize_node(node_data) or { return }
// Calculate current path
mut current_prefix := prefix
// If this is not the root, add the character
if node.character != 0 {
current_prefix += node.character.ascii_str()
}
// If this marks the end of a key, add it to the result
if node.is_end_of_string {
println('Found key: ${current_prefix}')
@@ -168,16 +180,16 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
result << current_prefix
}
}
// Visit all children
if node.left_id != 0 {
self.collect_all_keys(node.left_id, prefix, mut result)!
}
if node.middle_id != 0 {
self.collect_all_keys(node.middle_id, current_prefix, mut result)!
}
if node.right_id != 0 {
self.collect_all_keys(node.right_id, prefix, mut result)!
}
@@ -187,7 +199,7 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
pub fn (mut self TST) getall(prefix string) ![][]u8 {
normalized_prefix := namefix(prefix)
println('Getting all values with prefix: "${prefix}" (normalized: "${normalized_prefix}")')
// Get all matching keys
keys := self.list(normalized_prefix)!
@@ -201,4 +213,4 @@ pub fn (mut self TST) getall(prefix string) ![][]u8 {
println('Found ${values.len} values with prefix "${normalized_prefix}"')
return values
}
}

View File

@@ -182,13 +182,13 @@ fn test_getall() {
return
}
assert hel_values.len == 2, 'Expected 2 values with prefix "hel", got ${hel_values.len}'
// Convert byte arrays to strings for easier comparison
mut hel_strings := []string{}
for val in hel_values {
hel_strings << val.bytestr()
}
assert 'world' in hel_strings, 'Expected "world" in values with prefix "hel"'
assert 'me' in hel_strings, 'Expected "me" in values with prefix "hel"'
}
@@ -232,4 +232,4 @@ fn test_persistence() {
}
assert value2.bytestr() == 'value', 'Expected "value", got "${value2.bytestr()}"'
}
}
}

View File

@@ -116,11 +116,11 @@ fn (p CustomProperty) xml_str() string {
fn test_custom_property() {
// Test custom property
custom_prop := CustomProperty{
name: 'author'
value: 'Kristof'
name: 'author'
value: 'Kristof'
namespace: 'C'
}
assert custom_prop.xml_str() == '<C:author>Kristof</C:author>'
assert custom_prop.xml_name() == '<author/>'
}
@@ -131,16 +131,15 @@ fn test_propfind_response() {
props << DisplayName('test-file.txt')
props << GetLastModified('Mon, 01 Jan 2024 12:00:00 GMT')
props << GetContentLength('1024')
// Build a complete PROPFIND response with multistatus
xml_output := '<D:multistatus xmlns:D="DAV:">
<D:response>
<D:href>/test-file.txt</D:href>
${props.xml_str()}
</D:response>
</D:multistatus>'
// Verify the XML structure
</D:multistatus>' // Verify the XML structure
assert xml_output.contains('<D:multistatus')
assert xml_output.contains('<D:response>')
assert xml_output.contains('<D:href>')
@@ -157,7 +156,7 @@ fn test_propfind_with_missing_properties() {
</D:prop>
<D:status>HTTP/1.1 404 Not Found</D:status>
</D:propstat>'
// Simple verification of structure
assert missing_prop_response.contains('<D:propstat>')
assert missing_prop_response.contains('<D:nonexistent-property/>')
@@ -167,12 +166,12 @@ fn test_propfind_with_missing_properties() {
fn test_supported_lock_detailed() {
supported_lock := SupportedLock('')
xml_output := supported_lock.xml_str()
// Test SupportedLock provides a fully formed XML snippet for supportedlock
// Note: This test assumes the actual implementation returns a simplified version
// as indicated by the xml_str() method which returns '<D:supportedlock>...</D:supportedlock>'
assert xml_output.contains('<D:supportedlock>')
// Detailed testing would need proper parsing of the XML to verify elements
// For real implementation, test should check for:
// - lockentry elements
@@ -183,11 +182,11 @@ fn test_supported_lock_detailed() {
fn test_proppatch_request() {
// Create property to set
author_prop := CustomProperty{
name: 'author'
value: 'Kristof'
name: 'author'
value: 'Kristof'
namespace: 'C'
}
// Create XML for PROPPATCH request (set)
proppatch_set := '<D:propertyupdate xmlns:D="DAV:" xmlns:C="http://example.com/customns">
<D:set>
@@ -195,14 +194,13 @@ fn test_proppatch_request() {
${author_prop.xml_str()}
</D:prop>
</D:set>
</D:propertyupdate>'
// Check structure
</D:propertyupdate>' // Check structure
assert proppatch_set.contains('<D:propertyupdate')
assert proppatch_set.contains('<D:set>')
assert proppatch_set.contains('<D:prop>')
assert proppatch_set.contains('<C:author>Kristof</C:author>')
// Create XML for PROPPATCH request (remove)
proppatch_remove := '<D:propertyupdate xmlns:D="DAV:">
<D:remove>
@@ -211,7 +209,7 @@ fn test_proppatch_request() {
</D:prop>
</D:remove>
</D:propertyupdate>'
// Check structure
assert proppatch_remove.contains('<D:propertyupdate')
assert proppatch_remove.contains('<D:remove>')
@@ -224,7 +222,7 @@ fn test_prop_name_listing() {
mut props := []Property{}
props << DisplayName('file.txt')
props << GetContentType('text/plain')
// Generate propname response
// Note: In a complete implementation, there would be a function to generate this XML
// For testing purposes, we're manually creating the expected structure
@@ -240,7 +238,7 @@ fn test_prop_name_listing() {
</D:propstat>
</D:response>
</D:multistatus>'
// Verify structure
assert propname_response.contains('<D:multistatus')
assert propname_response.contains('<D:prop>')
@@ -262,7 +260,7 @@ fn test_namespace_declarations() {
</D:propstat>
</D:response>
</D:multistatus>'
// Verify key namespace elements
assert response_with_ns.contains('xmlns:D="DAV:"')
assert response_with_ns.contains('xmlns:C="http://example.com/customns"')
@@ -290,7 +288,7 @@ fn test_depth_header_responses() {
</D:propstat>
</D:response>
</D:multistatus>'
// Verify structure contains multiple responses
assert multi_response.contains('<D:response>')
assert multi_response.count('<D:response>') == 2

View File

@@ -303,22 +303,22 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
// Check if this is a binary file upload based on content type
content_type := ctx.req.header.get(.content_type) or { '' }
is_binary := is_binary_content_type(content_type)
// Handle binary uploads directly
if is_binary {
log.info('[WebDAV] Processing binary upload for ${path} (${content_type})')
// Handle the binary upload directly
ctx.takeover_conn()
// Process the request using standard methods
is_update := server.vfs.exists(path)
// Return success response
ctx.res.set_status(if is_update { .ok } else { .created })
return veb.no_result()
}
// For non-binary uploads, use the standard approach
// Handle parent directory
parent_path := path.all_before_last('/')
@@ -345,13 +345,13 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
ctx.res.set_status(.conflict)
return ctx.text('HTTP 409: Conflict - Cannot replace directory with file')
}
// Create the file after deleting the directory
server.vfs.file_create(path) or {
log.error('[WebDAV] Failed to create file ${path} after deleting directory: ${err.msg()}')
return ctx.server_error('Failed to create file: ${err.msg()}')
}
// Now it's not an update anymore
is_update = false
}
@@ -602,22 +602,15 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
fn is_binary_content_type(content_type string) bool {
// Normalize the content type by converting to lowercase
normalized := content_type.to_lower()
// Check for common binary file types
return normalized.contains('application/octet-stream') ||
(normalized.contains('application/') && (
normalized.contains('msword') ||
normalized.contains('excel') ||
normalized.contains('powerpoint') ||
normalized.contains('pdf') ||
normalized.contains('zip') ||
normalized.contains('gzip') ||
normalized.contains('x-tar') ||
normalized.contains('x-7z') ||
normalized.contains('x-rar')
)) ||
(normalized.contains('image/') && !normalized.contains('svg')) ||
normalized.contains('audio/') ||
normalized.contains('video/') ||
normalized.contains('vnd.openxmlformats') // Office documents
return normalized.contains('application/octet-stream')
|| (normalized.contains('application/') && (normalized.contains('msword')
|| normalized.contains('excel') || normalized.contains('powerpoint')
|| normalized.contains('pdf') || normalized.contains('zip')
|| normalized.contains('gzip') || normalized.contains('x-tar')
|| normalized.contains('x-7z') || normalized.contains('x-rar')))
|| (normalized.contains('image/') && !normalized.contains('svg'))
|| normalized.contains('audio/') || normalized.contains('video/')
|| normalized.contains('vnd.openxmlformats') // Office documents
}

View File

@@ -66,19 +66,35 @@ fn (mut server Server) get_entry_property(entry &vfs.FSEntry, name string) !Prop
property_name := if name.contains(':') { name.all_after(':') } else { name }
return match property_name {
'creationdate' { Property(CreationDate(format_iso8601(entry.get_metadata().created_time()))) }
'getetag' { Property(GetETag(entry.get_metadata().id.str())) }
'resourcetype' { Property(ResourceType(entry.is_dir())) }
'getlastmodified', 'lastmodified_server' {
'creationdate' {
Property(CreationDate(format_iso8601(entry.get_metadata().created_time())))
}
'getetag' {
Property(GetETag(entry.get_metadata().id.str()))
}
'resourcetype' {
Property(ResourceType(entry.is_dir()))
}
'getlastmodified', 'lastmodified_server' {
// Both standard getlastmodified and custom lastmodified_server properties
// return the same information
Property(GetLastModified(texttools.format_rfc1123(entry.get_metadata().modified_time())))
}
'getcontentlength' { Property(GetContentLength(entry.get_metadata().size.str())) }
'quota-available-bytes' { Property(QuotaAvailableBytes(16184098816)) }
'quota-used-bytes' { Property(QuotaUsedBytes(16184098816)) }
'quotaused' { Property(QuotaUsed(16184098816)) }
'quota' { Property(Quota(16184098816)) }
'getcontentlength' {
Property(GetContentLength(entry.get_metadata().size.str()))
}
'quota-available-bytes' {
Property(QuotaAvailableBytes(16184098816))
}
'quota-used-bytes' {
Property(QuotaUsedBytes(16184098816))
}
'quotaused' {
Property(QuotaUsed(16184098816))
}
'quota' {
Property(Quota(16184098816))
}
'displayname' {
// RFC 4918, Section 15.2: displayname is a human-readable name for UI display
// For now, we use the filename as the displayname, but this could be enhanced
@@ -102,7 +118,7 @@ fn (mut server Server) get_entry_property(entry &vfs.FSEntry, name string) !Prop
// Always show as unlocked for now to ensure compatibility
Property(LockDiscovery(''))
}
else {
else {
// For any unimplemented property, return an empty string instead of panicking
// This improves compatibility with various WebDAV clients
log.info('[WebDAV] Unimplemented property requested: ${name}')
@@ -127,16 +143,24 @@ fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, pat
}
// main entry response
responses << PropfindResponse{
href: ensure_leading_slash(if entry.is_dir() { '${path.trim_string_right('/')}/' } else { path })
href: ensure_leading_slash(if entry.is_dir() {
'${path.trim_string_right('/')}/'
} else {
path
})
// not_found: entry.get_unfound_properties(req)
found_props: properties
}
} else {
responses << PropfindResponse{
href: ensure_leading_slash(if entry.is_dir() { '${path.trim_string_right('/')}/' } else { path })
// not_found: entry.get_unfound_properties(req)
found_props: server.get_properties(entry)
}
responses << PropfindResponse{
href: ensure_leading_slash(if entry.is_dir() {
'${path.trim_string_right('/')}/'
} else {
path
})
// not_found: entry.get_unfound_properties(req)
found_props: server.get_properties(entry)
}
}
if !entry.is_dir() || req.depth == .zero {
@@ -148,10 +172,10 @@ fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, pat
return responses
}
for e in entries {
child_path := if path.ends_with('/') {
path + e.get_metadata().name
} else {
path + '/' + e.get_metadata().name
child_path := if path.ends_with('/') {
path + e.get_metadata().name
} else {
path + '/' + e.get_metadata().name
}
responses << server.get_responses(e, PropfindRequest{
...req

View File

@@ -487,11 +487,12 @@ fn test_server_propfind() ! {
assert ctx.res.header.get(.content_type)! == 'application/xml'
assert ctx.res.body.contains('<D:multistatus')
assert ctx.res.body.contains('<D:response>')
// Now that we know the correct format, check for it - directories have both leading and trailing slashes
assert ctx.res.body.contains('<D:href>/${root_dir}/</D:href>')
// Should only include the requested resource
assert !ctx.res.body.contains('<D:href>/${file_in_root}</D:href>') && !ctx.res.body.contains('<D:href>/${file_in_root}')
assert !ctx.res.body.contains('<D:href>/${file_in_root}</D:href>')
&& !ctx.res.body.contains('<D:href>/${file_in_root}')
// Test PROPFIND with depth=1 (resource and immediate children)
mut ctx2 := Context{

View File

@@ -10,7 +10,7 @@ import freeflowuniverse.herolib.core.redisclient
__global (
circle_global map[string]&CircleCoordinator
circle_default string
action_queues map[string]&ActionQueue
action_queues map[string]&ActionQueue
)
// HeroRunner is the main factory for managing jobs, agents, services, circles and names
@@ -101,7 +101,7 @@ pub fn new(args_ CircleCoordinatorArgs) !&CircleCoordinator {
@[params]
pub struct ActionQueueArgs {
pub mut:
name string = 'default' // Name of the queue
name string = 'default' // Name of the queue
redis_addr string // Redis server address, defaults to 'localhost:6379'
}
@@ -109,48 +109,48 @@ pub mut:
pub fn new_action_queue(args ActionQueueArgs) !&ActionQueue {
// Normalize the queue name
queue_name := texttools.name_fix(args.name)
// Check if queue already exists in global map
if queue_name in action_queues {
mut q := action_queues[queue_name] or { panic('bug') }
return q
}
// Set default Redis address if not provided
mut redis_addr := args.redis_addr
if redis_addr == '' {
redis_addr = 'localhost:6379'
}
// Create Redis client
mut redis := redisclient.new(redis_addr)!
// Create Redis queue
queue_key := 'actionqueue:${queue_name}'
mut redis_queue := redis.queue_get(queue_key)
// Create ActionQueue
mut action_queue := &ActionQueue{
name: queue_name
name: queue_name
queue: &redis_queue
redis: redis
}
// Store in global map
action_queues[queue_name] = action_queue
return action_queue
}
// get_action_queue retrieves an existing ActionQueue or creates a new one
pub fn get_action_queue(name string) !&ActionQueue {
queue_name := texttools.name_fix(name)
if queue_name in action_queues {
mut q := action_queues[queue_name] or { panic('bug') }
return q
}
return new_action_queue(ActionQueueArgs{
name: queue_name
})!
@@ -159,17 +159,17 @@ pub fn get_action_queue(name string) !&ActionQueue {
// get_or_create_action_queue retrieves an existing ActionQueue for a CircleCoordinator or creates a new one
pub fn (mut cc CircleCoordinator) get_or_create_action_queue(name string) !&ActionQueue {
queue_name := texttools.name_fix(name)
if queue_name in cc.action_queues {
mut q := cc.action_queues[queue_name] or { panic('bug') }
return q
}
mut action_queue := new_action_queue(ActionQueueArgs{
name: queue_name
})!
cc.action_queues[queue_name] = action_queue
return action_queue
}

View File

@@ -19,14 +19,14 @@ pub enum ActionJobStatus {
@[heap]
pub struct ActionJob {
pub mut:
guid string
guid string
heroscript string
created ourtime.OurTime
deadline ourtime.OurTime
status ActionJobStatus
error string // Error message if job failed
async bool // Whether the job should be processed asynchronously
circleid string // ID of the circle this job belongs to
created ourtime.OurTime
deadline ourtime.OurTime
status ActionJobStatus
error string // Error message if job failed
async bool // Whether the job should be processed asynchronously
circleid string // ID of the circle this job belongs to
}
// ActionQueue is a queue of actions to be processed, which comes from a redis queue
@@ -44,15 +44,15 @@ pub fn new_action_job(heroscript string) ActionJob {
// Default deadline is 1 hour from now
mut deadline := ourtime.now()
deadline.warp('+1h') or { panic('Failed to set deadline: ${err}') }
return ActionJob{
guid: time.now().unix_milli().str(),
heroscript: heroscript,
created: now,
deadline: deadline,
status: .pending,
async: false,
circleid: ''
guid: time.now().unix_milli().str()
heroscript: heroscript
created: now
deadline: deadline
status: .pending
async: false
circleid: ''
}
}
@@ -78,15 +78,15 @@ pub fn (job ActionJob) to_playbook() !&playbook.PlayBook {
if job.heroscript.trim_space() == '' {
return error('No heroscript content in job')
}
// Create a new PlayBook with the heroscript content
mut pb := playbook.new(text: job.heroscript)!
// Check if any actions were found
if pb.actions.len == 0 {
return error('No actions found in heroscript')
}
return &pb
}
@@ -104,7 +104,7 @@ pub fn (mut q ActionQueue) add_job(job ActionJob) ! {
if job.error != '' {
q.redis.hset(job_key, 'error', job.error)!
}
// Add the job reference to the queue
q.queue.add(job.guid)!
}
@@ -112,32 +112,32 @@ pub fn (mut q ActionQueue) add_job(job ActionJob) ! {
// get_job retrieves a job from Redis by its GUID
pub fn (mut q ActionQueue) get_job(guid string) !ActionJob {
job_key := 'heroactionjobs:${guid}'
// Check if the job exists
if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found')
}
// Retrieve job fields
mut job := ActionJob{
guid: guid,
heroscript: q.redis.hget(job_key, 'heroscript')!,
status: ActionJobStatus.pending, // Default value, will be overwritten
error: '', // Default empty error message
async: false, // Default to synchronous
circleid: '' // Default to empty circle ID
guid: guid
heroscript: q.redis.hget(job_key, 'heroscript')!
status: ActionJobStatus.pending // Default value, will be overwritten
error: '' // Default empty error message
async: false // Default to synchronous
circleid: '' // Default to empty circle ID
}
// Parse created time
created_str := q.redis.hget(job_key, 'created')!
created_unix := created_str.i64()
job.created = ourtime.new_from_epoch(u64(created_unix))
// Parse deadline
deadline_str := q.redis.hget(job_key, 'deadline')!
deadline_unix := deadline_str.i64()
job.deadline = ourtime.new_from_epoch(u64(deadline_unix))
// Parse status
status_str := q.redis.hget(job_key, 'status')!
match status_str {
@@ -148,29 +148,29 @@ pub fn (mut q ActionQueue) get_job(guid string) !ActionJob {
'cancelled' { job.status = .cancelled }
else { job.status = .pending } // Default to pending if unknown
}
// Get error message if exists
job.error = q.redis.hget(job_key, 'error') or { '' }
// Get async flag
async_str := q.redis.hget(job_key, 'async') or { 'false' }
job.async = async_str == 'true'
// Get circle ID
job.circleid = q.redis.hget(job_key, 'circleid') or { '' }
return job
}
// update_job_status updates the status of a job in Redis
pub fn (mut q ActionQueue) update_job_status(guid string, status ActionJobStatus) ! {
job_key := 'heroactionjobs:${guid}'
// Check if the job exists
if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found')
}
// Update status
q.redis.hset(job_key, 'status', status.str())!
}
@@ -178,12 +178,12 @@ pub fn (mut q ActionQueue) update_job_status(guid string, status ActionJobStatus
// set_job_failed marks a job as failed with an error message
pub fn (mut q ActionQueue) set_job_failed(guid string, error_msg string) ! {
job_key := 'heroactionjobs:${guid}'
// Check if the job exists
if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found')
}
// Update status and error message
q.redis.hset(job_key, 'status', ActionJobStatus.failed.str())!
q.redis.hset(job_key, 'error', error_msg)!
@@ -202,32 +202,32 @@ pub fn (mut q ActionQueue) find_failed_jobs() ![]ActionJob {
// and replaced with a more efficient implementation using SCAN
keys := q.redis.keys('heroactionjobs:*')!
mut failed_jobs := []ActionJob{}
for key in keys {
// Check if job is failed
status := q.redis.hget(key, 'status') or { continue }
if status == ActionJobStatus.failed.str() {
// Get the job GUID from the key
guid := key.all_after('heroactionjobs:')
// Get the full job
job := q.get_job(guid) or { continue }
failed_jobs << job
}
}
return failed_jobs
}
// delete_job deletes a job from Redis
pub fn (mut q ActionQueue) delete_job(guid string) ! {
job_key := 'heroactionjobs:${guid}'
// Check if the job exists
if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found')
}
// Delete the job
q.redis.del(job_key)!
}

View File

@@ -7,26 +7,26 @@ fn test_action_job() {
// Create a new action job
heroscript := '!!action.test name:test1'
job := new_action_job(heroscript)
// Verify job properties
assert job.guid != ''
assert job.heroscript == heroscript
assert job.status == ActionJobStatus.pending
assert !job.created.empty()
assert !job.deadline.empty()
// Test JSON serialization
json_str := job.to_json()
job2 := action_job_from_json(json_str) or {
assert false, 'Failed to decode job from JSON: ${err}'
return
}
// Verify deserialized job
assert job2.guid == job.guid
assert job2.heroscript == job.heroscript
assert job2.status == job.status
// Test creating job with custom deadline
job3 := new_action_job_with_deadline(heroscript, '+2h') or {
assert false, 'Failed to create job with deadline: ${err}'
@@ -41,7 +41,7 @@ fn test_action_queue() {
println('Skipping Redis test (use -d test_with_redis to run)')
return
}
// Create a new action queue
queue_name := 'test_queue_${time.now().unix_milli()}'
mut queue := new_action_queue(ActionQueueArgs{
@@ -50,13 +50,13 @@ fn test_action_queue() {
assert false, 'Failed to create action queue: ${err}'
return
}
// Create test jobs
mut job1 := new_action_job('!!action.test1 name:test1')
mut job2 := new_action_job('!!action.test2 name:test2')
mut job3 := new_action_job('!!action.test3 name:test3')
mut job4 := new_action_job('!!action.test4 name:test4')
// Add jobs to the queue
queue.add_job(job1) or {
assert false, 'Failed to add job1: ${err}'
@@ -70,14 +70,14 @@ fn test_action_queue() {
assert false, 'Failed to add job3: ${err}'
return
}
// Test count_waiting_jobs
wait_count := queue.count_waiting_jobs() or {
assert false, 'Failed to count waiting jobs: ${err}'
return
}
assert wait_count == 3, 'Expected 3 waiting jobs, got ${wait_count}'
// Fetch jobs from the queue
fetched_job1 := queue.pop_job() or {
assert false, 'Failed to pop job1: ${err}'
@@ -85,20 +85,20 @@ fn test_action_queue() {
}
assert fetched_job1.guid == job1.guid
assert fetched_job1.heroscript == job1.heroscript
fetched_job2 := queue.pop_job() or {
assert false, 'Failed to pop job2: ${err}'
return
}
assert fetched_job2.guid == job2.guid
assert fetched_job2.heroscript == job2.heroscript
// Update job status
queue.update_job_status(job3.guid, .processing) or {
assert false, 'Failed to update job status: ${err}'
return
}
// Fetch job with updated status
fetched_job3 := queue.pop_job() or {
assert false, 'Failed to pop job3: ${err}'
@@ -106,19 +106,19 @@ fn test_action_queue() {
}
assert fetched_job3.guid == job3.guid
assert fetched_job3.status == .processing
// Test setting a job as failed with error message
queue.add_job(job4) or {
assert false, 'Failed to add job4: ${err}'
return
}
// Set job as failed
queue.set_job_failed(job4.guid, 'Test error message') or {
assert false, 'Failed to set job as failed: ${err}'
return
}
// Get the failed job and verify error message
failed_job := queue.get_job(job4.guid) or {
assert false, 'Failed to get failed job: ${err}'
@@ -126,7 +126,7 @@ fn test_action_queue() {
}
assert failed_job.status == .failed
assert failed_job.error == 'Test error message'
// Test finding failed jobs
failed_jobs := queue.find_failed_jobs() or {
assert false, 'Failed to find failed jobs: ${err}'
@@ -135,39 +135,39 @@ fn test_action_queue() {
assert failed_jobs.len > 0, 'Expected at least one failed job'
assert failed_jobs[0].guid == job4.guid
assert failed_jobs[0].error == 'Test error message'
// Delete a job
queue.delete_job(job3.guid) or {
assert false, 'Failed to delete job: ${err}'
return
}
// Try to get deleted job (should fail)
queue.get_job(job3.guid) or {
// Expected error
assert err.str().contains('not found')
return
}
// Test direct put and fetch to verify heroscript preservation
test_heroscript := '!!action.special name:direct_test param1:value1 param2:value2'
mut direct_job := new_action_job(test_heroscript)
// Add the job
queue.add_job(direct_job) or {
assert false, 'Failed to add direct job: ${err}'
return
}
// Fetch the job by GUID
fetched_direct_job := queue.get_job(direct_job.guid) or {
assert false, 'Failed to get direct job: ${err}'
return
}
// Verify the heroscript is preserved exactly
assert fetched_direct_job.heroscript == test_heroscript, 'Heroscript was not preserved correctly'
// Clean up
queue.delete() or {
assert false, 'Failed to delete queue: ${err}'

View File

@@ -40,6 +40,7 @@ pub fn (mut m DBHandler[T]) get_data(id u32) ![]u8 {
}
return item_data
}
pub fn (mut m DBHandler[T]) exists(id u32) !bool {
item_data := m.session_state.dbs.db_data_core.get(id) or { return false }
return item_data != []u8{}

View File

@@ -1,7 +1,8 @@
module circle
import freeflowuniverse.herolib.hero.db.core { DBHandler, SessionState, new_dbhandler }
import freeflowuniverse.herolib.hero.db.models.circle { User, Role }
import freeflowuniverse.herolib.hero.db.models.circle { Role, User }
type UserObj = User
@[heap]
@@ -55,7 +56,7 @@ pub fn (mut m UserDB) delete(obj UserObj) ! {
// get_by_name retrieves a user by its name
pub fn (mut m UserDB) get_by_name(name string) !UserObj {
data := m.db.get_data_by_key('name', name)!
return loads_user(data)!
return loads_user(data)!
}
// delete_by_name removes a user by its name
@@ -80,4 +81,4 @@ pub fn (mut m UserDB) update_user_role(name string, new_role Role) !UserObj {
// Save the updated user
return m.set(user)!
}
}

View File

@@ -1,83 +1,80 @@
module circle
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.hero.db.models.circle { User, Role }
import freeflowuniverse.herolib.hero.db.models.circle { Role, User }
// dumps serializes a User struct to binary data
pub fn (user UserObj) dumps() ![]u8 {
mut e := encoder.new()
// Add version byte (v1)
e.add_u8(1)
// Encode Base struct fields
e.add_u32(user.Base.id)
e.add_ourtime(user.Base.creation_time)
e.add_ourtime(user.Base.mod_time)
// Encode comments array from Base
e.add_u16(u16(user.Base.comments.len))
for id in user.Base.comments {
e.add_u32(id)
}
// Encode User-specific fields
e.add_string(user.name)
e.add_string(user.description)
e.add_u8(u8(user.role)) // Encode enum as u8
// Encode contact_ids array
e.add_u16(u16(user.contact_ids.len))
for id in user.contact_ids {
e.add_u32(id)
}
// Encode wallet_ids array
e.add_u16(u16(user.wallet_ids.len))
for id in user.wallet_ids {
e.add_u32(id)
}
// Encode pubkey
e.add_string(user.pubkey)
return e.data
}
// loads deserializes binary data to a User struct
pub fn loads_user(data []u8) !User {
mut d := encoder.decoder_new(data)
// Read version byte
version := d.get_u8()!
if version != 1 {
return error('Unsupported version: ${version}')
}
// Create a new User instance
mut user := User{}
// Decode Base struct fields
user.id = d.get_u32()!
user.creation_time = d.get_ourtime()!
user.mod_time = d.get_ourtime()!
// Decode comments array from Base
comments_count := d.get_u16()!
user.comments = []u32{cap: int(comments_count)}
for _ in 0 .. comments_count {
user.comments << d.get_u32()!
}
// Decode User-specific fields
user.name = d.get_string()!
user.description = d.get_string()!
// Get the u8 value first
role_value := d.get_u8()!
// Validate and convert to Role enum
if role_value <= u8(Role.external) {
// Use unsafe block for casting number to enum as required by V
@@ -87,23 +84,23 @@ pub fn loads_user(data []u8) !User {
} else {
return error('Invalid role value: ${role_value}')
}
// Decode contact_ids array
contact_count := d.get_u16()!
user.contact_ids = []u32{cap: int(contact_count)}
for _ in 0 .. contact_count {
user.contact_ids << d.get_u32()!
}
// Decode wallet_ids array
wallet_count := d.get_u16()!
user.wallet_ids = []u32{cap: int(wallet_count)}
for _ in 0 .. wallet_count {
user.wallet_ids << d.get_u32()!
}
// Decode pubkey
user.pubkey = d.get_string()!
return user
}
}

View File

@@ -1,6 +1,6 @@
module circle
import freeflowuniverse.herolib.hero.db.core { SessionState, new_session }
import freeflowuniverse.herolib.hero.db.core { new_session }
import freeflowuniverse.herolib.hero.db.models.circle { Role }
import freeflowuniverse.herolib.data.ourtime
import os
@@ -8,7 +8,7 @@ import os
// test_user_db tests the functionality of the UserDB
pub fn test_user_db() ! {
println('Starting User DB Test')
// Create a temporary directory for the test
test_dir := os.join_path(os.temp_dir(), 'hero_user_test')
os.mkdir_all(test_dir) or { return error('Failed to create test directory: ${err}') }
@@ -16,20 +16,20 @@ pub fn test_user_db() ! {
// Clean up after test
os.rmdir_all(test_dir) or { eprintln('Failed to remove test directory: ${err}') }
}
// Create a new session state
mut session := new_session(
name: 'test_session'
path: test_dir
)!
println('Session created: ${session.name}')
// Initialize the UserDB
mut user_db := new_userdb(session)!
println('UserDB initialized')
// Create and add users
mut admin_user := user_db.new()
admin_user.name = 'admin_user'
@@ -41,11 +41,11 @@ pub fn test_user_db() ! {
// println(admin_user)
// if true{panic("sss")}
// Save the admin user
admin_user = user_db.set(admin_user)!
println('Admin user created with ID: ${admin_user.Base.id}')
// Create a regular member
mut member_user := user_db.new()
member_user.name = 'member_user'
@@ -54,11 +54,11 @@ pub fn test_user_db() ! {
member_user.pubkey = 'member_pubkey_456'
member_user.creation_time = ourtime.now()
member_user.mod_time = ourtime.now()
// Save the member user
member_user = user_db.set(member_user)!
println('Member user created with ID: ${member_user.Base.id}')
// Create a guest user
mut guest_user := user_db.new()
guest_user.name = 'guest_user'
@@ -67,48 +67,47 @@ pub fn test_user_db() ! {
guest_user.pubkey = 'guest_pubkey_789'
guest_user.creation_time = ourtime.now()
guest_user.mod_time = ourtime.now()
// Save the guest user
guest_user = user_db.set(guest_user)!
println('Guest user created with ID: ${guest_user.Base.id}')
// Retrieve users by ID
retrieved_admin := user_db.get(admin_user.Base.id)!
println('Retrieved admin user by ID: ${retrieved_admin.name} (Role: ${retrieved_admin.role})')
// Retrieve users by name
retrieved_member := user_db.get_by_name('member_user')!
println('Retrieved member user by name: ${retrieved_member.name} (Role: ${retrieved_member.role})')
// Update a user's role
updated_guest := user_db.update_user_role('guest_user', Role.contributor)!
println('Updated guest user role to contributor: ${updated_guest.name} (Role: ${updated_guest.role})')
// List all users
user_ids := user_db.list()!
println('Total users: ${user_ids.len}')
println('User IDs: ${user_ids}')
// Get all users
all_users := user_db.getall()!
println('All users:')
for user in all_users {
println(' - ${user.name} (ID: ${user.Base.id}, Role: ${user.role})')
}
// Delete a user
user_db.delete(member_user)!
println('Deleted member user with ID: ${member_user.Base.id}')
// Delete a user by name
user_db.delete_by_name('guest_user')!
println('Deleted guest user by name')
// List remaining users
remaining_user_ids := user_db.list()!
println('Remaining users: ${remaining_user_ids.len}')
println('Remaining user IDs: ${remaining_user_ids}')
println('User DB Test completed successfully')
}

View File

@@ -5,9 +5,8 @@ import freeflowuniverse.herolib.data.ourtime
// our attempt to make a message object which can be used for email as well as chat
pub struct Base {
pub mut:
id u32
creation_time ourtime.OurTime
mod_time ourtime.OurTime // Last modified time
comments []u32
id u32
creation_time ourtime.OurTime
mod_time ourtime.OurTime // Last modified time
comments []u32
}

Some files were not shown because too many files have changed in this diff Show More