This commit is contained in:
2025-05-04 08:19:47 +03:00
parent d8a59d0726
commit 46e1c6706c
177 changed files with 5708 additions and 5512 deletions

View File

@@ -5,25 +5,24 @@ module main
import freeflowuniverse.herolib.clients.openai import freeflowuniverse.herolib.clients.openai
import os import os
fn test1(mut client openai.OpenAI) ! {
fn test1(mut client openai.OpenAI)!{ instruction := '
instruction:='
You are a template language converter. You convert Pug templates to Jet templates. You are a template language converter. You convert Pug templates to Jet templates.
The target template language, Jet, is defined as follows: The target template language, Jet, is defined as follows:
' '
// Create a chat completion request // Create a chat completion request
res := client.chat_completion(msgs:openai.Messages{ res := client.chat_completion(
messages: [ msgs: openai.Messages{
openai.Message{ messages: [
role: .user openai.Message{
content: 'What are the key differences between Groq and other AI inference providers?' role: .user
}, content: 'What are the key differences between Groq and other AI inference providers?'
] },
})! ]
}
)!
// Print the response // Print the response
println('\nGroq AI Response:') println('\nGroq AI Response:')
@@ -33,23 +32,21 @@ fn test1(mut client openai.OpenAI)!{
println('Prompt tokens: ${res.usage.prompt_tokens}') println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}') println('Completion tokens: ${res.usage.completion_tokens}')
println('Total tokens: ${res.usage.total_tokens}') println('Total tokens: ${res.usage.total_tokens}')
} }
fn test2(mut client openai.OpenAI) ! {
fn test2(mut client openai.OpenAI)!{
// Create a chat completion request // Create a chat completion request
res := client.chat_completion( res := client.chat_completion(
model:"deepseek-r1-distill-llama-70b", model: 'deepseek-r1-distill-llama-70b'
msgs:openai.Messages{ msgs: openai.Messages{
messages: [ messages: [
openai.Message{ openai.Message{
role: .user role: .user
content: 'A story of 10 lines?' content: 'A story of 10 lines?'
}, },
] ]
})! }
)!
println('\nGroq AI Response:') println('\nGroq AI Response:')
println('==================') println('==================')
@@ -57,21 +54,18 @@ fn test2(mut client openai.OpenAI)!{
println('\nUsage Statistics:') println('\nUsage Statistics:')
println('Prompt tokens: ${res.usage.prompt_tokens}') println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}') println('Completion tokens: ${res.usage.completion_tokens}')
println('Total tokens: ${res.usage.total_tokens}') println('Total tokens: ${res.usage.total_tokens}')
} }
println("
println('
TO USE: TO USE:
export AIKEY=\'gsk_...\' export AIKEY='gsk_...'
export AIURL=\'https://api.groq.com/openai/v1\' export AIURL='https://api.groq.com/openai/v1'
export AIMODEL=\'llama-3.3-70b-versatile\' export AIMODEL='llama-3.3-70b-versatile'
') ")
mut client:=openai.get(name:"test")! mut client := openai.get(name: 'test')!
println(client) println(client)
// test1(mut client)! // test1(mut client)!
test2(mut client)! test2(mut client)!

View File

@@ -4,4 +4,4 @@ import freeflowuniverse.herolib.mcp.aitools
// aitools.convert_pug("/root/code/github/freeflowuniverse/herolauncher/pkg/herolauncher/web/templates/admin")! // aitools.convert_pug("/root/code/github/freeflowuniverse/herolauncher/pkg/herolauncher/web/templates/admin")!
aitools.convert_pug("/root/code/github/freeflowuniverse/herolauncher/pkg/zaz/webui/templates")! aitools.convert_pug('/root/code/github/freeflowuniverse/herolauncher/pkg/zaz/webui/templates')!

View File

@@ -12,7 +12,7 @@ println('Starting Qdrant example script')
println('Current directory: ${os.getwd()}') println('Current directory: ${os.getwd()}')
println('Home directory: ${os.home_dir()}') println('Home directory: ${os.home_dir()}')
mut i:=qdrant_installer.get()! mut i := qdrant_installer.get()!
i.install()! i.install()!
// 1. Get the qdrant client // 1. Get the qdrant client

View File

@@ -6,5 +6,3 @@ import freeflowuniverse.herolib.web.docusaurus
mut docs := docusaurus.new( mut docs := docusaurus.new(
build_path: '/tmp/docusaurus_build' build_path: '/tmp/docusaurus_build'
)! )!

View File

@@ -90,14 +90,13 @@ fn main() {
' '
mut docs := docusaurus.new( mut docs := docusaurus.new(
build_path: os.join_path(os.home_dir(), 'hero/var/docusaurus_demo1') build_path: os.join_path(os.home_dir(), 'hero/var/docusaurus_demo1')
update: true // Update the templates update: true // Update the templates
heroscript: hero_script heroscript: hero_script
) or { ) or {
eprintln('Error creating docusaurus factory with inline script: ${err}') eprintln('Error creating docusaurus factory with inline script: ${err}')
exit(1) exit(1)
} }
// Create a site directory if it doesn't exist // Create a site directory if it doesn't exist
site_path := os.join_path(os.home_dir(), 'hero/var/docusaurus_demo_src') site_path := os.join_path(os.home_dir(), 'hero/var/docusaurus_demo_src')
@@ -204,19 +203,19 @@ console.log(result);
eprintln('Error generating site: ${err}') eprintln('Error generating site: ${err}')
exit(1) exit(1)
} }
println('Site generated successfully!') println('Site generated successfully!')
// Choose which operation to perform: // Choose which operation to perform:
// Option 1: Run in development mode // Option 1: Run in development mode
// This will start a development server in a screen session // This will start a development server in a screen session
println('Starting development server...') println('Starting development server...')
site.dev() or { site.dev() or {
eprintln('Error starting development server: ${err}') eprintln('Error starting development server: ${err}')
exit(1) exit(1)
} }
// Option 2: Build for production (uncomment to use) // Option 2: Build for production (uncomment to use)
/* /*
println('Building site for production...') println('Building site for production...')
@@ -236,4 +235,4 @@ console.log(result);
} }
println('Site published successfully!') println('Site published successfully!')
*/ */
} }

View File

@@ -6,35 +6,35 @@ import freeflowuniverse.herolib.clients.openai
@[params] @[params]
pub struct TaskParams { pub struct TaskParams {
pub: pub:
name string name string
description string description string
} }
// Create a new task // Create a new task
pub fn new_task(params TaskParams) &Task { pub fn new_task(params TaskParams) &Task {
return &Task{ return &Task{
name: params.name name: params.name
description: params.description description: params.description
unit_tasks: [] unit_tasks: []
current_result: '' current_result: ''
} }
} }
// Default model configurations // Default model configurations
pub fn default_base_model() ModelConfig { pub fn default_base_model() ModelConfig {
return ModelConfig{ return ModelConfig{
name: 'qwen2.5-7b-instruct' name: 'qwen2.5-7b-instruct'
provider: 'openai' provider: 'openai'
temperature: 0.7 temperature: 0.7
max_tokens: 2000 max_tokens: 2000
} }
} }
pub fn default_retry_model() ModelConfig { pub fn default_retry_model() ModelConfig {
return ModelConfig{ return ModelConfig{
name: 'gpt-4' name: 'gpt-4'
provider: 'openai' provider: 'openai'
temperature: 0.7 temperature: 0.7
max_tokens: 4000 max_tokens: 4000
} }
} }

View File

@@ -5,59 +5,58 @@ import freeflowuniverse.herolib.clients.openai
// ModelConfig defines the configuration for an AI model // ModelConfig defines the configuration for an AI model
pub struct ModelConfig { pub struct ModelConfig {
pub mut: pub mut:
name string name string
provider string provider string
temperature f32 temperature f32
max_tokens int max_tokens int
} }
// Create model configs // Create model configs
const claude_3_sonnet = escalayer.ModelConfig{ const claude_3_sonnet = ModelConfig{
name: 'anthropic/claude-3.7-sonnet' name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic' provider: 'anthropic'
temperature: 0.7 temperature: 0.7
max_tokens: 25000 max_tokens: 25000
} }
const gpt4 = escalayer.ModelConfig{ const gpt4 = ModelConfig{
name: 'gpt-4' name: 'gpt-4'
provider: 'openai' provider: 'openai'
temperature: 0.7 temperature: 0.7
max_tokens: 25000 max_tokens: 25000
} }
// Call an AI model using OpenRouter // Call an AI model using OpenRouter
fn call_ai_model(prompt string, model ModelConfig)! string { fn call_ai_model(prompt string, model ModelConfig) !string {
// Get OpenAI client (configured for OpenRouter) // Get OpenAI client (configured for OpenRouter)
mut client := get_openrouter_client()! mut client := get_openrouter_client()!
// Create the message for the AI // Create the message for the AI
mut m := openai.Messages{ mut m := openai.Messages{
messages: [ messages: [
openai.Message{ openai.Message{
role: .system role: .system
content: 'You are a helpful assistant.' content: 'You are a helpful assistant.'
}, },
openai.Message{ openai.Message{
role: .user role: .user
content: prompt content: prompt
} },
] ]
} }
// Call the AI model // Call the AI model
res := client.chat_completion( res := client.chat_completion(
msgs: m, msgs: m
model: model.name, model: model.name
temperature: model.temperature, temperature: model.temperature
max_completion_tokens: model.max_tokens max_completion_tokens: model.max_tokens
)! )!
// Extract the response content // Extract the response content
if res.choices.len > 0 { if res.choices.len > 0 {
return res.choices[0].message.content return res.choices[0].message.content
} }
return error('No response from AI model') return error('No response from AI model')
} }

View File

@@ -5,19 +5,18 @@ import freeflowuniverse.herolib.osal
import os import os
// Get an OpenAI client configured for OpenRouter // Get an OpenAI client configured for OpenRouter
fn get_openrouter_client()! &openai.OpenAI { fn get_openrouter_client() !&openai.OpenAI {
osal.env_set(key: 'OPENROUTER_API_KEY', value: '') osal.env_set(key: 'OPENROUTER_API_KEY', value: '')
// Get API key from environment variable // Get API key from environment variable
api_key := os.getenv('OPENROUTER_API_KEY') api_key := os.getenv('OPENROUTER_API_KEY')
if api_key == '' { if api_key == '' {
return error('OPENROUTER_API_KEY environment variable not set') return error('OPENROUTER_API_KEY environment variable not set')
} }
// Create OpenAI client with OpenRouter base URL // Create OpenAI client with OpenRouter base URL
mut client := openai.get( mut client := openai.get(
name: 'openrouter' name: 'openrouter'
)! )!
return client return client
} }

View File

@@ -5,53 +5,61 @@ import log
// Task represents a complete AI task composed of multiple sequential unit tasks // Task represents a complete AI task composed of multiple sequential unit tasks
pub struct Task { pub struct Task {
pub mut: pub mut:
name string name string
description string description string
unit_tasks []UnitTask unit_tasks []UnitTask
current_result string current_result string
} }
// UnitTaskParams defines the parameters for creating a new unit task // UnitTaskParams defines the parameters for creating a new unit task
@[params] @[params]
pub struct UnitTaskParams { pub struct UnitTaskParams {
pub: pub:
name string name string
prompt_function fn(string) string prompt_function fn (string) string
callback_function fn(string)! string callback_function fn (string) !string
base_model ?ModelConfig base_model ?ModelConfig
retry_model ?ModelConfig retry_model ?ModelConfig
retry_count ?int retry_count ?int
} }
// Add a new unit task to the task // Add a new unit task to the task
pub fn (mut t Task) new_unit_task(params UnitTaskParams) &UnitTask { pub fn (mut t Task) new_unit_task(params UnitTaskParams) &UnitTask {
mut unit_task := UnitTask{ mut unit_task := UnitTask{
name: params.name name: params.name
prompt_function: params.prompt_function prompt_function: params.prompt_function
callback_function: params.callback_function callback_function: params.callback_function
base_model: if base_model := params.base_model { base_model } else { default_base_model() } base_model: if base_model := params.base_model {
retry_model: if retry_model := params.retry_model { retry_model } else { default_retry_model() } base_model
retry_count: if retry_count := params.retry_count { retry_count } else { 3 } } else {
} default_base_model()
}
t.unit_tasks << unit_task retry_model: if retry_model := params.retry_model {
return &t.unit_tasks[t.unit_tasks.len - 1] retry_model
} else {
default_retry_model()
}
retry_count: if retry_count := params.retry_count { retry_count } else { 3 }
}
t.unit_tasks << unit_task
return &t.unit_tasks[t.unit_tasks.len - 1]
} }
// Initiate the task execution // Initiate the task execution
pub fn (mut t Task) initiate(input string)! string { pub fn (mut t Task) initiate(input string) !string {
mut current_input := input mut current_input := input
for i, mut unit_task in t.unit_tasks { for i, mut unit_task in t.unit_tasks {
log.error('Executing unit task ${i+1}/${t.unit_tasks.len}: ${unit_task.name}') log.error('Executing unit task ${i + 1}/${t.unit_tasks.len}: ${unit_task.name}')
// Execute the unit task with the current input // Execute the unit task with the current input
result := unit_task.execute(current_input)! result := unit_task.execute(current_input)!
// Update the current input for the next unit task // Update the current input for the next unit task
current_input = result current_input = result
t.current_result = result t.current_result = result
} }
return t.current_result return t.current_result
} }

View File

@@ -6,66 +6,66 @@ import freeflowuniverse.herolib.clients.openai
// UnitTask represents a single step in the task // UnitTask represents a single step in the task
pub struct UnitTask { pub struct UnitTask {
pub mut: pub mut:
name string name string
prompt_function fn(string) string prompt_function fn (string) string
callback_function fn(string)! string callback_function fn (string) !string
base_model ModelConfig base_model ModelConfig
retry_model ModelConfig retry_model ModelConfig
retry_count int retry_count int
} }
// Execute the unit task // Execute the unit task
pub fn (mut ut UnitTask) execute(input string)! string { pub fn (mut ut UnitTask) execute(input string) !string {
// Generate the prompt using the prompt function // Generate the prompt using the prompt function
prompt := ut.prompt_function(input) prompt := ut.prompt_function(input)
// Try with the base model first // Try with the base model first
mut current_model := ut.base_model mut current_model := ut.base_model
mut attempts := 0 mut attempts := 0
mut max_attempts := ut.retry_count + 1 // +1 for the initial attempt mut max_attempts := ut.retry_count + 1 // +1 for the initial attempt
mut absolute_max_attempts := 1 // Hard limit on total attempts mut absolute_max_attempts := 1 // Hard limit on total attempts
mut last_error := '' mut last_error := ''
for attempts < max_attempts && attempts < absolute_max_attempts { for attempts < max_attempts && attempts < absolute_max_attempts {
attempts++ attempts++
// If we've exhausted retries with the base model, switch to the retry model // If we've exhausted retries with the base model, switch to the retry model
if attempts > ut.retry_count { if attempts > ut.retry_count {
log.error('Escalating to more powerful model: ${ut.retry_model.name}') log.error('Escalating to more powerful model: ${ut.retry_model.name}')
current_model = ut.retry_model current_model = ut.retry_model
// Calculate remaining attempts but don't exceed absolute max // Calculate remaining attempts but don't exceed absolute max
max_attempts = attempts + ut.retry_count max_attempts = attempts + ut.retry_count
if max_attempts > absolute_max_attempts { if max_attempts > absolute_max_attempts {
max_attempts = absolute_max_attempts max_attempts = absolute_max_attempts
} }
} }
log.error('Attempt ${attempts} with model ${current_model.name}') log.error('Attempt ${attempts} with model ${current_model.name}')
// Prepare the prompt with error feedback if this is a retry // Prepare the prompt with error feedback if this is a retry
mut current_prompt := prompt mut current_prompt := prompt
if last_error != '' { if last_error != '' {
current_prompt = 'Previous attempt failed with error: ${last_error}\n\n${prompt}' current_prompt = 'Previous attempt failed with error: ${last_error}\n\n${prompt}'
} }
// Call the AI model // Call the AI model
response := call_ai_model(current_prompt, current_model) or { response := call_ai_model(current_prompt, current_model) or {
log.error('AI call failed: ${err}') log.error('AI call failed: ${err}')
last_error = err.str() last_error = err.str()
continue // Try again continue // Try again
} }
// Process the response with the callback function // Process the response with the callback function
result := ut.callback_function(response) or { result := ut.callback_function(response) or {
// If callback returns an error, retry with the error message // If callback returns an error, retry with the error message
log.error('Callback returned error: ${err}') log.error('Callback returned error: ${err}')
last_error = err.str() last_error = err.str()
continue // Try again continue // Try again
} }
// If we get here, the callback was successful // If we get here, the callback was successful
return result return result
} }
return error('Failed to execute unit task after ${attempts} attempts. Last error: ${last_error}') return error('Failed to execute unit task after ${attempts} attempts. Last error: ${last_error}')
} }

View File

@@ -23,7 +23,7 @@ interface Backend {
tool_get(name string) !Tool tool_get(name string) !Tool
tool_list() ![]Tool tool_list() ![]Tool
tool_call(name string, arguments map[string]json2.Any) !ToolCallResult tool_call(name string, arguments map[string]json2.Any) !ToolCallResult
// Sampling methods // Sampling methods
sampling_create_message(params map[string]json2.Any) !SamplingCreateMessageResult sampling_create_message(params map[string]json2.Any) !SamplingCreateMessageResult
mut: mut:

View File

@@ -114,16 +114,14 @@ fn (b &MemoryBackend) prompt_messages_get(name string, arguments map[string]stri
return messages return messages
} }
fn (b &MemoryBackend) prompt_call(name string, arguments []string) ![]PromptMessage { fn (b &MemoryBackend) prompt_call(name string, arguments []string) ![]PromptMessage {
// Get the tool handler // Get the tool handler
handler := b.prompt_handlers[name] or { return error('tool handler not found') } handler := b.prompt_handlers[name] or { return error('tool handler not found') }
// Call the handler with the provided arguments // Call the handler with the provided arguments
return handler(arguments) or {panic(err)} return handler(arguments) or { panic(err) }
} }
// Tool related methods // Tool related methods
fn (b &MemoryBackend) tool_exists(name string) !bool { fn (b &MemoryBackend) tool_exists(name string) !bool {
@@ -165,11 +163,11 @@ fn (b &MemoryBackend) sampling_create_message(params map[string]json2.Any) !Samp
// Return a default implementation that just echoes back a message // Return a default implementation that just echoes back a message
// indicating that no sampling handler is registered // indicating that no sampling handler is registered
return SamplingCreateMessageResult{ return SamplingCreateMessageResult{
model: 'default' model: 'default'
stop_reason: 'endTurn' stop_reason: 'endTurn'
role: 'assistant' role: 'assistant'
content: MessageContent{ content: MessageContent{
typ: 'text' typ: 'text'
text: 'Sampling is not configured on this server. Please register a sampling handler.' text: 'Sampling is not configured on this server. Please register a sampling handler.'
} }
} }

View File

@@ -8,160 +8,165 @@ import freeflowuniverse.herolib.baobab.generator
import freeflowuniverse.herolib.baobab.specification import freeflowuniverse.herolib.baobab.specification
// generate_methods_file MCP Tool // generate_methods_file MCP Tool
// //
const generate_methods_file_tool = mcp.Tool{ const generate_methods_file_tool = mcp.Tool{
name: 'generate_methods_file' name: 'generate_methods_file'
description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification' description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'object' 'source': jsonschema.SchemaRef(jsonschema.Schema{
properties: { typ: 'object'
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'string' 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
}) typ: 'string'
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{ })
typ: 'string' 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
}) typ: 'string'
} })
})} }
required: ['source'] })
} }
required: ['source']
}
} }
pub fn (d &Baobab) generate_methods_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn (d &Baobab) generate_methods_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
source := json.decode[generator.Source](arguments["source"].str())! source := json.decode[generator.Source](arguments['source'].str())!
result := generator.generate_methods_file_str(source) result := generator.generate_methods_file_str(source) or {
or {
return mcp.error_tool_call_result(err) return mcp.error_tool_call_result(err)
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
// generate_module_from_openapi MCP Tool // generate_module_from_openapi MCP Tool
const generate_module_from_openapi_tool = mcp.Tool{ const generate_module_from_openapi_tool = mcp.Tool{
name: 'generate_module_from_openapi' name: 'generate_module_from_openapi'
description: '' description: ''
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: {'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'string' 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
})} typ: 'string'
required: ['openapi_path'] })
} }
required: ['openapi_path']
}
} }
pub fn (d &Baobab) generate_module_from_openapi_tool_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn (d &Baobab) generate_module_from_openapi_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
openapi_path := arguments["openapi_path"].str() openapi_path := arguments['openapi_path'].str()
result := generator.generate_module_from_openapi(openapi_path) result := generator.generate_module_from_openapi(openapi_path) or {
or {
return mcp.error_tool_call_result(err) return mcp.error_tool_call_result(err)
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
// generate_methods_interface_file MCP Tool // generate_methods_interface_file MCP Tool
const generate_methods_interface_file_tool = mcp.Tool{ const generate_methods_interface_file_tool = mcp.Tool{
name: 'generate_methods_interface_file' name: 'generate_methods_interface_file'
description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification' description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'object' 'source': jsonschema.SchemaRef(jsonschema.Schema{
properties: { typ: 'object'
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'string' 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
}) typ: 'string'
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{ })
typ: 'string' 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
}) typ: 'string'
} })
})} }
required: ['source'] })
} }
required: ['source']
}
} }
pub fn (d &Baobab) generate_methods_interface_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn (d &Baobab) generate_methods_interface_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
source := json.decode[generator.Source](arguments["source"].str())! source := json.decode[generator.Source](arguments['source'].str())!
result := generator.generate_methods_interface_file_str(source) result := generator.generate_methods_interface_file_str(source) or {
or {
return mcp.error_tool_call_result(err) return mcp.error_tool_call_result(err)
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
// generate_model_file MCP Tool // generate_model_file MCP Tool
const generate_model_file_tool = mcp.Tool{ const generate_model_file_tool = mcp.Tool{
name: 'generate_model_file' name: 'generate_model_file'
description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification' description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'object' 'source': jsonschema.SchemaRef(jsonschema.Schema{
properties: { typ: 'object'
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'string' 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
}) typ: 'string'
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{ })
typ: 'string' 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
}) typ: 'string'
} })
})} }
required: ['source'] })
} }
required: ['source']
}
} }
pub fn (d &Baobab) generate_model_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn (d &Baobab) generate_model_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
source := json.decode[generator.Source](arguments["source"].str())! source := json.decode[generator.Source](arguments['source'].str())!
result := generator.generate_model_file_str(source) result := generator.generate_model_file_str(source) or {
or {
return mcp.error_tool_call_result(err) return mcp.error_tool_call_result(err)
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
// generate_methods_example_file MCP Tool // generate_methods_example_file MCP Tool
const generate_methods_example_file_tool = mcp.Tool{ const generate_methods_example_file_tool = mcp.Tool{
name: 'generate_methods_example_file' name: 'generate_methods_example_file'
description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification' description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: {'source': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'object' 'source': jsonschema.SchemaRef(jsonschema.Schema{
properties: { typ: 'object'
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{ properties: {
typ: 'string' 'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
}) typ: 'string'
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{ })
typ: 'string' 'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
}) typ: 'string'
} })
})} }
required: ['source'] })
} }
required: ['source']
}
} }
pub fn (d &Baobab) generate_methods_example_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn (d &Baobab) generate_methods_example_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
source := json.decode[generator.Source](arguments["source"].str())! source := json.decode[generator.Source](arguments['source'].str())!
result := generator.generate_methods_example_file_str(source) result := generator.generate_methods_example_file_str(source) or {
or {
return mcp.error_tool_call_result(err) return mcp.error_tool_call_result(err)
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }

View File

@@ -13,7 +13,7 @@ import os
fn test_generate_module_from_openapi_tool() { fn test_generate_module_from_openapi_tool() {
// Verify the tool definition // Verify the tool definition
assert generate_module_from_openapi_tool.name == 'generate_module_from_openapi', 'Tool name should be "generate_module_from_openapi"' assert generate_module_from_openapi_tool.name == 'generate_module_from_openapi', 'Tool name should be "generate_module_from_openapi"'
// Verify the input schema // Verify the input schema
assert generate_module_from_openapi_tool.input_schema.typ == 'object', 'Input schema type should be "object"' assert generate_module_from_openapi_tool.input_schema.typ == 'object', 'Input schema type should be "object"'
assert 'openapi_path' in generate_module_from_openapi_tool.input_schema.properties, 'Input schema should have "openapi_path" property' assert 'openapi_path' in generate_module_from_openapi_tool.input_schema.properties, 'Input schema should have "openapi_path" property'
@@ -26,14 +26,14 @@ fn test_generate_module_from_openapi_tool_handler_error() {
// Create arguments with a non-existent file path // Create arguments with a non-existent file path
mut arguments := map[string]json2.Any{} mut arguments := map[string]json2.Any{}
arguments['openapi_path'] = json2.Any('non_existent_file.yaml') arguments['openapi_path'] = json2.Any('non_existent_file.yaml')
// Call the handler // Call the handler
result := generate_module_from_openapi_tool_handler(arguments) or { result := generate_module_from_openapi_tool_handler(arguments) or {
// If the handler returns an error, that's expected // If the handler returns an error, that's expected
assert err.msg().contains(''), 'Error message should not be empty' assert err.msg().contains(''), 'Error message should not be empty'
return return
} }
// If we get here, the handler should have returned an error result // If we get here, the handler should have returned an error result
assert result.is_error, 'Result should indicate an error' assert result.is_error, 'Result should indicate an error'
assert result.content.len > 0, 'Error content should not be empty' assert result.content.len > 0, 'Error content should not be empty'
@@ -48,7 +48,7 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to create MCP server: ${err}' assert false, 'Failed to create MCP server: ${err}'
return return
} }
// Create a temporary OpenAPI file for testing // Create a temporary OpenAPI file for testing
temp_dir := os.temp_dir() temp_dir := os.temp_dir()
temp_file := os.join_path(temp_dir, 'test_openapi.yaml') temp_file := os.join_path(temp_dir, 'test_openapi.yaml')
@@ -56,30 +56,30 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to create temporary file: ${err}' assert false, 'Failed to create temporary file: ${err}'
return return
} }
// Sample tool call request // Sample tool call request
tool_call_request := '{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"generate_module_from_openapi","arguments":{"openapi_path":"${temp_file}"}}}' tool_call_request := '{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"generate_module_from_openapi","arguments":{"openapi_path":"${temp_file}"}}}'
// Process the request through the handler // Process the request through the handler
response := server.handler.handle(tool_call_request) or { response := server.handler.handle(tool_call_request) or {
// Clean up the temporary file // Clean up the temporary file
os.rm(temp_file) or {} os.rm(temp_file) or {}
// If the handler returns an error, that's expected in this test environment // If the handler returns an error, that's expected in this test environment
// since we might not have all dependencies set up // since we might not have all dependencies set up
return return
} }
// Clean up the temporary file // Clean up the temporary file
os.rm(temp_file) or {} os.rm(temp_file) or {}
// Decode the response to verify its structure // Decode the response to verify its structure
decoded_response := jsonrpc.decode_response(response) or { decoded_response := jsonrpc.decode_response(response) or {
// In a test environment, we might get an error due to missing dependencies // In a test environment, we might get an error due to missing dependencies
// This is acceptable for this test // This is acceptable for this test
return return
} }
// If we got a successful response, verify it // If we got a successful response, verify it
if !decoded_response.is_error() { if !decoded_response.is_error() {
// Parse the result to verify its contents // Parse the result to verify its contents
@@ -87,15 +87,15 @@ fn test_mcp_tool_call_integration() {
assert false, 'Failed to get result: ${err}' assert false, 'Failed to get result: ${err}'
return return
} }
// Decode the result to check the content // Decode the result to check the content
result_map := json2.raw_decode(result_json) or { result_map := json2.raw_decode(result_json) or {
assert false, 'Failed to decode result: ${err}' assert false, 'Failed to decode result: ${err}'
return return
}.as_map() }.as_map()
// Verify the result structure // Verify the result structure
assert 'isError' in result_map, 'Result should have isError field' assert 'isError' in result_map, 'Result should have isError field'
assert 'content' in result_map, 'Result should have content field' assert 'content' in result_map, 'Result should have content field'
} }
} }

View File

@@ -2,22 +2,21 @@ module baobab
import cli import cli
pub const command := cli.Command{ pub const command = cli.Command{
sort_flags: true sort_flags: true
name: 'baobab' name: 'baobab'
// execute: cmd_mcpgen // execute: cmd_mcpgen
description: 'baobab command' description: 'baobab command'
commands: [ commands: [
cli.Command{ cli.Command{
name: 'start' name: 'start'
execute: cmd_start execute: cmd_start
description: 'start the Baobab server' description: 'start the Baobab server'
} },
] ]
} }
fn cmd_start(cmd cli.Command) ! { fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server(&Baobab{})! mut server := new_mcp_server(&Baobab{})!
server.start()! server.start()!
} }

View File

@@ -67,7 +67,7 @@ fn test_mcp_server_initialize() {
// Verify the protocol version matches what was requested // Verify the protocol version matches what was requested
assert result.protocol_version == '2024-11-05', 'Protocol version should match the request' assert result.protocol_version == '2024-11-05', 'Protocol version should match the request'
// Verify server info // Verify server info
assert result.server_info.name == 'developer', 'Server name should be "developer"' assert result.server_info.name == 'developer', 'Server name should be "developer"'
} }
@@ -113,7 +113,7 @@ fn test_tools_list() {
// Verify that the tools array exists and contains the expected tool // Verify that the tools array exists and contains the expected tool
tools := result_map['tools'].arr() tools := result_map['tools'].arr()
assert tools.len > 0, 'Tools list should not be empty' assert tools.len > 0, 'Tools list should not be empty'
// Find the generate_module_from_openapi tool // Find the generate_module_from_openapi tool
mut found_tool := false mut found_tool := false
for tool in tools { for tool in tools {
@@ -123,6 +123,6 @@ fn test_tools_list() {
break break
} }
} }
assert found_tool, 'generate_module_from_openapi tool should be registered' assert found_tool, 'generate_module_from_openapi tool should be registered'
} }

View File

@@ -13,18 +13,18 @@ pub fn new_mcp_server(v &Baobab) !&mcp.Server {
// Initialize the server with the empty handlers map // Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{ mut server := mcp.new_server(mcp.MemoryBackend{
tools: { tools: {
'generate_module_from_openapi': generate_module_from_openapi_tool 'generate_module_from_openapi': generate_module_from_openapi_tool
'generate_methods_file': generate_methods_file_tool 'generate_methods_file': generate_methods_file_tool
'generate_methods_interface_file': generate_methods_interface_file_tool 'generate_methods_interface_file': generate_methods_interface_file_tool
'generate_model_file': generate_model_file_tool 'generate_model_file': generate_model_file_tool
'generate_methods_example_file': generate_methods_example_file_tool 'generate_methods_example_file': generate_methods_example_file_tool
} }
tool_handlers: { tool_handlers: {
'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler 'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler
'generate_methods_file': v.generate_methods_file_tool_handler 'generate_methods_file': v.generate_methods_file_tool_handler
'generate_methods_interface_file': v.generate_methods_interface_file_tool_handler 'generate_methods_interface_file': v.generate_methods_interface_file_tool_handler
'generate_model_file': v.generate_model_file_tool_handler 'generate_model_file': v.generate_model_file_tool_handler
'generate_methods_example_file': v.generate_methods_example_file_tool_handler 'generate_methods_example_file': v.generate_methods_example_file_tool_handler
} }
}, mcp.ServerParams{ }, mcp.ServerParams{
config: mcp.ServerConfiguration{ config: mcp.ServerConfiguration{
@@ -35,4 +35,4 @@ pub fn new_mcp_server(v &Baobab) !&mcp.Server {
} }
})! })!
return server return server
} }

View File

@@ -13,20 +13,20 @@ prod_mode := fp.bool('prod', `p`, false, 'Build production version (optimized)')
help_requested := fp.bool('help', `h`, false, 'Show help message') help_requested := fp.bool('help', `h`, false, 'Show help message')
if help_requested { if help_requested {
println(fp.usage()) println(fp.usage())
exit(0) exit(0)
} }
additional_args := fp.finalize() or { additional_args := fp.finalize() or {
eprintln(err) eprintln(err)
println(fp.usage()) println(fp.usage())
exit(1) exit(1)
} }
if additional_args.len > 0 { if additional_args.len > 0 {
eprintln('Unexpected arguments: ${additional_args.join(' ')}') eprintln('Unexpected arguments: ${additional_args.join(' ')}')
println(fp.usage()) println(fp.usage())
exit(1) exit(1)
} }
// Change to the mcp directory // Change to the mcp directory
@@ -36,20 +36,20 @@ os.chdir(mcp_dir) or { panic('Failed to change directory to ${mcp_dir}: ${err}')
// Set MCPPATH based on OS // Set MCPPATH based on OS
mut mcppath := '/usr/local/bin/mcp' mut mcppath := '/usr/local/bin/mcp'
if os.user_os() == 'macos' { if os.user_os() == 'macos' {
mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp') mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp')
} }
// Set compilation command based on OS and mode // Set compilation command based on OS and mode
compile_cmd := if prod_mode { compile_cmd := if prod_mode {
'v -enable-globals -w -n -prod mcp.v' 'v -enable-globals -w -n -prod mcp.v'
} else { } else {
'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v' 'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v'
} }
println('Building MCP in ${if prod_mode { 'production' } else { 'debug' }} mode...') println('Building MCP in ${if prod_mode { 'production' } else { 'debug' }} mode...')
if os.system(compile_cmd) != 0 { if os.system(compile_cmd) != 0 {
panic('Failed to compile mcp.v with command: ${compile_cmd}') panic('Failed to compile mcp.v with command: ${compile_cmd}')
} }
// Make executable // Make executable

View File

@@ -45,11 +45,11 @@ mcp
description: 'show verbose output' description: 'show verbose output'
}) })
mut cmd_inspector := cli.Command{ mut cmd_inspector := Command{
sort_flags: true sort_flags: true
name: 'inspector' name: 'inspector'
execute: cmd_inspector_execute execute: cmd_inspector_execute
description: 'will list existing mdbooks' description: 'will list existing mdbooks'
} }
cmd_inspector.add_flag(Flag{ cmd_inspector.add_flag(Flag{
@@ -68,7 +68,6 @@ mcp
description: 'open inspector' description: 'open inspector'
}) })
cmd_mcp.add_command(rhai_mcp.command) cmd_mcp.add_command(rhai_mcp.command)
cmd_mcp.add_command(rust.command) cmd_mcp.add_command(rust.command)
// cmd_mcp.add_command(baobab.command) // cmd_mcp.add_command(baobab.command)
@@ -79,7 +78,7 @@ mcp
cmd_mcp.parse(os.args) cmd_mcp.parse(os.args)
} }
fn cmd_inspector_execute(cmd cli.Command) ! { fn cmd_inspector_execute(cmd Command) ! {
open := cmd.flags.get_bool('open') or { false } open := cmd.flags.get_bool('open') or { false }
if open { if open {
osal.exec(cmd: 'open http://localhost:5173')! osal.exec(cmd: 'open http://localhost:5173')!
@@ -91,4 +90,4 @@ fn cmd_inspector_execute(cmd cli.Command) ! {
} else { } else {
osal.exec(cmd: 'npx @modelcontextprotocol/inspector')! osal.exec(cmd: 'npx @modelcontextprotocol/inspector')!
} }
} }

View File

@@ -1,6 +1,5 @@
module mcp module mcp
pub fn result_to_mcp_tool_contents[T](result T) []ToolContent { pub fn result_to_mcp_tool_contents[T](result T) []ToolContent {
return [result_to_mcp_tool_content[T](result)] return [result_to_mcp_tool_content[T](result)]
} }
@@ -50,4 +49,4 @@ pub fn array_to_mcp_tool_contents[U](array []U) []ToolContent {
contents << result_to_mcp_tool_content(item) contents << result_to_mcp_tool_content(item)
} }
return contents return contents
} }

View File

@@ -110,7 +110,8 @@ fn (mut s Server) prompts_get_handler(data string) !string {
// messages := s.backend.prompt_messages_get(request.params.name, request.params.arguments)! // messages := s.backend.prompt_messages_get(request.params.name, request.params.arguments)!
// Create a success response with the result // Create a success response with the result
response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(), PromptGetResult{ response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(),
PromptGetResult{
description: prompt.description description: prompt.description
messages: messages messages: messages
}) })

View File

@@ -30,9 +30,9 @@ pub:
pub struct ModelPreferences { pub struct ModelPreferences {
pub: pub:
hints []ModelHint hints []ModelHint
cost_priority f32 @[json: 'costPriority'] cost_priority f32 @[json: 'costPriority']
speed_priority f32 @[json: 'speedPriority'] speed_priority f32 @[json: 'speedPriority']
intelligence_priority f32 @[json: 'intelligencePriority'] intelligence_priority f32 @[json: 'intelligencePriority']
} }
@@ -43,8 +43,8 @@ pub:
system_prompt string @[json: 'systemPrompt'] system_prompt string @[json: 'systemPrompt']
include_context string @[json: 'includeContext'] include_context string @[json: 'includeContext']
temperature f32 temperature f32
max_tokens int @[json: 'maxTokens'] max_tokens int @[json: 'maxTokens']
stop_sequences []string @[json: 'stopSequences'] stop_sequences []string @[json: 'stopSequences']
metadata map[string]json2.Any metadata map[string]json2.Any
} }
@@ -63,21 +63,21 @@ fn (mut s Server) sampling_create_message_handler(data string) !string {
request_map := json2.raw_decode(data)!.as_map() request_map := json2.raw_decode(data)!.as_map()
id := request_map['id'].int() id := request_map['id'].int()
params_map := request_map['params'].as_map() params_map := request_map['params'].as_map()
// Validate required parameters // Validate required parameters
if 'messages' !in params_map { if 'messages' !in params_map {
return jsonrpc.new_error_response(id, missing_required_argument('messages')).encode() return jsonrpc.new_error_response(id, missing_required_argument('messages')).encode()
} }
if 'maxTokens' !in params_map { if 'maxTokens' !in params_map {
return jsonrpc.new_error_response(id, missing_required_argument('maxTokens')).encode() return jsonrpc.new_error_response(id, missing_required_argument('maxTokens')).encode()
} }
// Call the backend to handle the sampling request // Call the backend to handle the sampling request
result := s.backend.sampling_create_message(params_map) or { result := s.backend.sampling_create_message(params_map) or {
return jsonrpc.new_error_response(id, sampling_error(err.msg())).encode() return jsonrpc.new_error_response(id, sampling_error(err.msg())).encode()
} }
// Create a success response with the result // Create a success response with the result
response := jsonrpc.new_response(id, json.encode(result)) response := jsonrpc.new_response(id, json.encode(result))
return response.encode() return response.encode()
@@ -87,30 +87,30 @@ fn (mut s Server) sampling_create_message_handler(data string) !string {
fn parse_messages(messages_json json2.Any) ![]Message { fn parse_messages(messages_json json2.Any) ![]Message {
messages_arr := messages_json.arr() messages_arr := messages_json.arr()
mut result := []Message{cap: messages_arr.len} mut result := []Message{cap: messages_arr.len}
for msg_json in messages_arr { for msg_json in messages_arr {
msg_map := msg_json.as_map() msg_map := msg_json.as_map()
if 'role' !in msg_map { if 'role' !in msg_map {
return error('Missing role in message') return error('Missing role in message')
} }
if 'content' !in msg_map { if 'content' !in msg_map {
return error('Missing content in message') return error('Missing content in message')
} }
role := msg_map['role'].str() role := msg_map['role'].str()
content_map := msg_map['content'].as_map() content_map := msg_map['content'].as_map()
if 'type' !in content_map { if 'type' !in content_map {
return error('Missing type in message content') return error('Missing type in message content')
} }
typ := content_map['type'].str() typ := content_map['type'].str()
mut text := '' mut text := ''
mut data := '' mut data := ''
mut mimetype := '' mut mimetype := ''
if typ == 'text' { if typ == 'text' {
if 'text' !in content_map { if 'text' !in content_map {
return error('Missing text in text content') return error('Missing text in text content')
@@ -121,7 +121,7 @@ fn parse_messages(messages_json json2.Any) ![]Message {
return error('Missing data in image content') return error('Missing data in image content')
} }
data = content_map['data'].str() data = content_map['data'].str()
if 'mimeType' !in content_map { if 'mimeType' !in content_map {
return error('Missing mimeType in image content') return error('Missing mimeType in image content')
} }
@@ -129,17 +129,17 @@ fn parse_messages(messages_json json2.Any) ![]Message {
} else { } else {
return error('Unsupported content type: ${typ}') return error('Unsupported content type: ${typ}')
} }
result << Message{ result << Message{
role: role role: role
content: MessageContent{ content: MessageContent{
typ: typ typ: typ
text: text text: text
data: data data: data
mimetype: mimetype mimetype: mimetype
} }
} }
} }
return result return result
} }

View File

@@ -26,8 +26,8 @@ pub:
pub struct ToolItems { pub struct ToolItems {
pub: pub:
typ string @[json: 'type'] typ string @[json: 'type']
enum []string enum []string
properties map[string]ToolProperty properties map[string]ToolProperty
} }
@@ -63,7 +63,7 @@ fn (mut s Server) tools_list_handler(data string) !string {
// TODO: Implement pagination logic using the cursor // TODO: Implement pagination logic using the cursor
// For now, return all tools // For now, return all tools
encoded := json.encode(ToolListResult{ encoded := json.encode(ToolListResult{
tools: s.backend.tool_list()! tools: s.backend.tool_list()!
next_cursor: '' // Empty if no more pages next_cursor: '' // Empty if no more pages
}) })
@@ -148,4 +148,4 @@ pub fn error_tool_call_result(err IError) ToolCallResult {
text: err.msg() text: err.msg()
}] }]
} }
} }

View File

@@ -2,22 +2,21 @@ module mcpgen
import cli import cli
pub const command := cli.Command{ pub const command = cli.Command{
sort_flags: true sort_flags: true
name: 'mcpgen' name: 'mcpgen'
// execute: cmd_mcpgen // execute: cmd_mcpgen
description: 'will list existing mdbooks' description: 'will list existing mdbooks'
commands: [ commands: [
cli.Command{ cli.Command{
name: 'start' name: 'start'
execute: cmd_start execute: cmd_start
description: 'start the MCP server' description: 'start the MCP server'
} },
] ]
} }
fn cmd_start(cmd cli.Command) ! { fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server(&MCPGen{})! mut server := new_mcp_server(&MCPGen{})!
server.start()! server.start()!
} }

View File

@@ -7,7 +7,7 @@ import freeflowuniverse.herolib.schemas.jsonschema.codegen
import os import os
pub struct FunctionPointer { pub struct FunctionPointer {
name string // name of function name string // name of function
module_path string // path to module module_path string // path to module
} }
@@ -15,14 +15,14 @@ pub struct FunctionPointer {
// returns an MCP Tool code in v for attaching the function to the mcp server // returns an MCP Tool code in v for attaching the function to the mcp server
// function_pointers: A list of function pointers to generate tools for // function_pointers: A list of function pointers to generate tools for
pub fn (d &MCPGen) create_mcp_tools_code(function_pointers []FunctionPointer) !string { pub fn (d &MCPGen) create_mcp_tools_code(function_pointers []FunctionPointer) !string {
mut str := "" mut str := ''
for function_pointer in function_pointers { for function_pointer in function_pointers {
str += d.create_mcp_tool_code(function_pointer.name, function_pointer.module_path)! str += d.create_mcp_tool_code(function_pointer.name, function_pointer.module_path)!
} }
return str return str
} }
// create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists. // create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
// returns an MCP Tool code in v for attaching the function to the mcp server // returns an MCP Tool code in v for attaching the function to the mcp server
@@ -30,11 +30,10 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
if !os.exists(module_path) { if !os.exists(module_path) {
return error('Module path does not exist: ${module_path}') return error('Module path does not exist: ${module_path}')
} }
function := code.get_function_from_module(module_path, function_name) or { function := code.get_function_from_module(module_path, function_name) or {
return error('Failed to get function ${function_name} from module ${module_path}\n${err}') return error('Failed to get function ${function_name} from module ${module_path}\n${err}')
} }
mut types := map[string]string{} mut types := map[string]string{}
for param in function.params { for param in function.params {
@@ -43,9 +42,9 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
types[param.typ.symbol()] = code.get_type_from_module(module_path, param.typ.symbol())! types[param.typ.symbol()] = code.get_type_from_module(module_path, param.typ.symbol())!
} }
} }
// Get the result type if it's a struct // Get the result type if it's a struct
mut result_ := "" mut result_ := ''
if function.result.typ is code.Result { if function.result.typ is code.Result {
result_type := (function.result.typ as code.Result).typ result_type := (function.result.typ as code.Result).typ
if result_type is code.Object { if result_type is code.Object {
@@ -60,7 +59,7 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
handler := d.create_mcp_tool_handler(function, types, result_)! handler := d.create_mcp_tool_handler(function, types, result_)!
str := $tmpl('./templates/tool_code.v.template') str := $tmpl('./templates/tool_code.v.template')
return str return str
} }
// create_mcp_tool parses a V language function string and returns an MCP Tool struct // create_mcp_tool parses a V language function string and returns an MCP Tool struct
// function: The V function string including preceding comments // function: The V function string including preceding comments
@@ -68,7 +67,7 @@ pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string
// result: The type of result of the create_mcp_tool function. Could be simply string, or struct {...} // result: The type of result of the create_mcp_tool function. Could be simply string, or struct {...}
pub fn (d &MCPGen) create_mcp_tool_handler(function code.Function, types map[string]string, result_ string) !string { pub fn (d &MCPGen) create_mcp_tool_handler(function code.Function, types map[string]string, result_ string) !string {
decode_stmts := function.params.map(argument_decode_stmt(it)).join_lines() decode_stmts := function.params.map(argument_decode_stmt(it)).join_lines()
function_call := 'd.${function.name}(${function.params.map(it.name).join(',')})' function_call := 'd.${function.name}(${function.params.map(it.name).join(',')})'
result := code.parse_type(result_) result := code.parse_type(result_)
str := $tmpl('./templates/tool_handler.v.template') str := $tmpl('./templates/tool_handler.v.template')
@@ -92,6 +91,7 @@ pub fn argument_decode_stmt(param code.Param) string {
panic('Unsupported type: ${param.typ}') panic('Unsupported type: ${param.typ}')
} }
} }
/* /*
in @generate_mcp.v , implement a create_mpc_tool_handler function that given a vlang function string and the types that map to their corresponding type definitions (for instance struct some_type: SomeType{...}), generates a vlang function such as the following: in @generate_mcp.v , implement a create_mpc_tool_handler function that given a vlang function string and the types that map to their corresponding type definitions (for instance struct some_type: SomeType{...}), generates a vlang function such as the following:
@@ -103,7 +103,6 @@ pub fn (d &MCPGen) create_mcp_tool_tool_handler(arguments map[string]Any) !mcp.T
} }
*/ */
// create_mcp_tool parses a V language function string and returns an MCP Tool struct // create_mcp_tool parses a V language function string and returns an MCP Tool struct
// function: The V function string including preceding comments // function: The V function string including preceding comments
// types: A map of struct names to their definitions for complex parameter types // types: A map of struct names to their definitions for complex parameter types
@@ -111,14 +110,14 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// Create input schema for parameters // Create input schema for parameters
mut properties := map[string]jsonschema.SchemaRef{} mut properties := map[string]jsonschema.SchemaRef{}
mut required := []string{} mut required := []string{}
for param in function.params { for param in function.params {
// Add to required parameters // Add to required parameters
required << param.name required << param.name
// Create property for this parameter // Create property for this parameter
mut property := jsonschema.SchemaRef{} mut property := jsonschema.SchemaRef{}
// Check if this is a complex type defined in the types map // Check if this is a complex type defined in the types map
if param.typ.symbol() in types { if param.typ.symbol() in types {
// Parse the struct definition to create a nested schema // Parse the struct definition to create a nested schema
@@ -133,21 +132,21 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// Handle primitive types // Handle primitive types
property = codegen.typesymbol_to_schema(param.typ.symbol()) property = codegen.typesymbol_to_schema(param.typ.symbol())
} }
properties[param.name] = property properties[param.name] = property
} }
// Create the input schema // Create the input schema
input_schema := jsonschema.Schema{ input_schema := jsonschema.Schema{
typ: 'object', typ: 'object'
properties: properties, properties: properties
required: required required: required
} }
// Create and return the Tool // Create and return the Tool
return mcp.Tool{ return mcp.Tool{
name: function.name, name: function.name
description: function.description, description: function.description
input_schema: input_schema input_schema: input_schema
} }
} }
@@ -157,7 +156,7 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// // returns: A jsonschema.Schema for the given input type // // returns: A jsonschema.Schema for the given input type
// // errors: Returns an error if the input type is not supported // // errors: Returns an error if the input type is not supported
// pub fn (d MCPGen) create_mcp_tool_input_schema(input string) !jsonschema.Schema { // pub fn (d MCPGen) create_mcp_tool_input_schema(input string) !jsonschema.Schema {
// // if input is a primitive type, return a mcp jsonschema.Schema with that type // // if input is a primitive type, return a mcp jsonschema.Schema with that type
// if input == 'string' { // if input == 'string' {
// return jsonschema.Schema{ // return jsonschema.Schema{
@@ -176,30 +175,30 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// typ: 'boolean' // typ: 'boolean'
// } // }
// } // }
// // if input is a struct, return a mcp jsonschema.Schema with typ 'object' and properties for each field in the struct // // if input is a struct, return a mcp jsonschema.Schema with typ 'object' and properties for each field in the struct
// if input.starts_with('pub struct ') { // if input.starts_with('pub struct ') {
// struct_name := input[11..].split(' ')[0] // struct_name := input[11..].split(' ')[0]
// fields := parse_struct_fields(input) // fields := parse_struct_fields(input)
// mut properties := map[string]jsonschema.Schema{} // mut properties := map[string]jsonschema.Schema{}
// for field_name, field_type in fields { // for field_name, field_type in fields {
// property := jsonschema.Schema{ // property := jsonschema.Schema{
// typ: d.create_mcp_tool_input_schema(field_type)!.typ // typ: d.create_mcp_tool_input_schema(field_type)!.typ
// } // }
// properties[field_name] = property // properties[field_name] = property
// } // }
// return jsonschema.Schema{ // return jsonschema.Schema{
// typ: 'object', // typ: 'object',
// properties: properties // properties: properties
// } // }
// } // }
// // if input is an array, return a mcp jsonschema.Schema with typ 'array' and items of the item type // // if input is an array, return a mcp jsonschema.Schema with typ 'array' and items of the item type
// if input.starts_with('[]') { // if input.starts_with('[]') {
// item_type := input[2..] // item_type := input[2..]
// // For array types, we create a schema with type 'array' // // For array types, we create a schema with type 'array'
// // The actual item type is determined by the primitive type // // The actual item type is determined by the primitive type
// mut item_type_str := 'string' // default // mut item_type_str := 'string' // default
@@ -210,74 +209,73 @@ pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]strin
// } else if item_type == 'bool' { // } else if item_type == 'bool' {
// item_type_str = 'boolean' // item_type_str = 'boolean'
// } // }
// // Create a property for the array items // // Create a property for the array items
// mut property := jsonschema.Schema{ // mut property := jsonschema.Schema{
// typ: 'array' // typ: 'array'
// } // }
// // Add the property to the schema // // Add the property to the schema
// mut properties := map[string]jsonschema.Schema{} // mut properties := map[string]jsonschema.Schema{}
// properties['items'] = property // properties['items'] = property
// return jsonschema.Schema{ // return jsonschema.Schema{
// typ: 'array', // typ: 'array',
// properties: properties // properties: properties
// } // }
// } // }
// // Default to string type for unknown types // // Default to string type for unknown types
// return jsonschema.Schema{ // return jsonschema.Schema{
// typ: 'string' // typ: 'string'
// } // }
// } // }
// parse_struct_fields parses a V language struct definition string and returns a map of field names to their types // parse_struct_fields parses a V language struct definition string and returns a map of field names to their types
fn parse_struct_fields(struct_def string) map[string]string { fn parse_struct_fields(struct_def string) map[string]string {
mut fields := map[string]string{} mut fields := map[string]string{}
// Find the opening and closing braces of the struct definition // Find the opening and closing braces of the struct definition
start_idx := struct_def.index('{') or { return fields } start_idx := struct_def.index('{') or { return fields }
end_idx := struct_def.last_index('}') or { return fields } end_idx := struct_def.last_index('}') or { return fields }
// Extract the content between the braces // Extract the content between the braces
struct_content := struct_def[start_idx + 1..end_idx].trim_space() struct_content := struct_def[start_idx + 1..end_idx].trim_space()
// Split the content by newlines to get individual field definitions // Split the content by newlines to get individual field definitions
field_lines := struct_content.split(' field_lines := struct_content.split('
') ')
for line in field_lines { for line in field_lines {
trimmed_line := line.trim_space() trimmed_line := line.trim_space()
// Skip empty lines and comments // Skip empty lines and comments
if trimmed_line == '' || trimmed_line.starts_with('//') { if trimmed_line == '' || trimmed_line.starts_with('//') {
continue continue
} }
// Handle pub: or mut: prefixes // Handle pub: or mut: prefixes
mut field_def := trimmed_line mut field_def := trimmed_line
if field_def.starts_with('pub:') || field_def.starts_with('mut:') { if field_def.starts_with('pub:') || field_def.starts_with('mut:') {
field_def = field_def.all_after(':').trim_space() field_def = field_def.all_after(':').trim_space()
} }
// Split by whitespace to separate field name and type // Split by whitespace to separate field name and type
parts := field_def.split_any(' ') parts := field_def.split_any(' ')
if parts.len < 2 { if parts.len < 2 {
continue continue
} }
field_name := parts[0] field_name := parts[0]
field_type := parts[1..].join(' ') field_type := parts[1..].join(' ')
// Handle attributes like @[json: 'name'] // Handle attributes like @[json: 'name']
if field_name.contains('@[') { if field_name.contains('@[') {
continue continue
} }
fields[field_name] = field_type fields[field_name] = field_type
} }
return fields return fields
} }

View File

@@ -12,42 +12,41 @@ import x.json2 as json { Any }
// function_pointers: A list of function pointers to generate tools for // function_pointers: A list of function pointers to generate tools for
const create_mcp_tools_code_tool = mcp.Tool{ const create_mcp_tools_code_tool = mcp.Tool{
name: 'create_mcp_tools_code' name: 'create_mcp_tools_code'
description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists. description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
returns an MCP Tool code in v for attaching the function to the mcp server returns an MCP Tool code in v for attaching the function to the mcp server
function_pointers: A list of function pointers to generate tools for' function_pointers: A list of function pointers to generate tools for'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{ 'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'array' typ: 'array'
items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{ items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'name': jsonschema.SchemaRef(jsonschema.Schema{ 'name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
'module_path': jsonschema.SchemaRef(jsonschema.Schema{ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
} }
required: ['name', 'module_path'] required: ['name', 'module_path']
})) }))
}) })
} }
required: ['function_pointers'] required: ['function_pointers']
} }
} }
pub fn (d &MCPGen) create_mcp_tools_code_tool_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn (d &MCPGen) create_mcp_tools_code_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
function_pointers := json.decode[[]FunctionPointer](arguments["function_pointers"].str())! function_pointers := json.decode[[]FunctionPointer](arguments['function_pointers'].str())!
result := d.create_mcp_tools_code(function_pointers) result := d.create_mcp_tools_code(function_pointers) or {
or {
return mcp.error_tool_call_result(err) return mcp.error_tool_call_result(err)
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
@@ -59,10 +58,10 @@ returns an MCP Tool code in v for attaching the function to the mcp server'
typ: 'object' typ: 'object'
properties: { properties: {
'function_name': jsonschema.SchemaRef(jsonschema.Schema{ 'function_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
'module_path': jsonschema.SchemaRef(jsonschema.Schema{ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
} }
required: ['function_name', 'module_path'] required: ['function_name', 'module_path']

View File

@@ -12,16 +12,16 @@ pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
// Initialize the server with the empty handlers map // Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{ mut server := mcp.new_server(mcp.MemoryBackend{
tools: { tools: {
'create_mcp_tool_code': create_mcp_tool_code_tool 'create_mcp_tool_code': create_mcp_tool_code_tool
'create_mcp_tool_const': create_mcp_tool_const_tool 'create_mcp_tool_const': create_mcp_tool_const_tool
'create_mcp_tool_handler': create_mcp_tool_handler_tool 'create_mcp_tool_handler': create_mcp_tool_handler_tool
'create_mcp_tools_code': create_mcp_tools_code_tool 'create_mcp_tools_code': create_mcp_tools_code_tool
} }
tool_handlers: { tool_handlers: {
'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler 'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler
'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler 'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler
'create_mcp_tool_handler': v.create_mcp_tool_handler_tool_handler 'create_mcp_tool_handler': v.create_mcp_tool_handler_tool_handler
'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler 'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler
} }
}, mcp.ServerParams{ }, mcp.ServerParams{
config: mcp.ServerConfiguration{ config: mcp.ServerConfiguration{
@@ -32,4 +32,4 @@ pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
} }
})! })!
return server return server
} }

View File

@@ -8,7 +8,7 @@ fn main() {
eprintln('Failed to create MCP server: ${err}') eprintln('Failed to create MCP server: ${err}')
return return
} }
// Start the server // Start the server
server.start() or { server.start() or {
eprintln('Failed to start MCP server: ${err}') eprintln('Failed to start MCP server: ${err}')

View File

@@ -5,8 +5,7 @@ import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.pathlib import freeflowuniverse.herolib.core.pathlib
import json import json
pub fn convert_pug(mydir string)! { pub fn convert_pug(mydir string) ! {
mut d := pathlib.get_dir(path: mydir, create: false)! mut d := pathlib.get_dir(path: mydir, create: false)!
list := d.list(regex: [r'.*\.pug$'], include_links: false, files_only: true)! list := d.list(regex: [r'.*\.pug$'], include_links: false, files_only: true)!
for item in list.paths { for item in list.paths {
@@ -17,12 +16,12 @@ pub fn convert_pug(mydir string)! {
// extract_template parses AI response content to extract just the template // extract_template parses AI response content to extract just the template
fn extract_template(raw_content string) string { fn extract_template(raw_content string) string {
mut content := raw_content mut content := raw_content
// First check for </think> tag // First check for </think> tag
if content.contains('</think>') { if content.contains('</think>') {
content = content.split('</think>')[1].trim_space() content = content.split('</think>')[1].trim_space()
} }
// Look for ```jet code block // Look for ```jet code block
if content.contains('```jet') { if content.contains('```jet') {
parts := content.split('```jet') parts := content.split('```jet')
@@ -39,7 +38,7 @@ fn extract_template(raw_content string) string {
// Take the content between the first set of ``` // Take the content between the first set of ```
// This handles both ```content``` and cases where there's only an opening ``` // This handles both ```content``` and cases where there's only an opening ```
content = parts[1].trim_space() content = parts[1].trim_space()
// If we only see an opening ``` but no closing, cleanup any remaining backticks // If we only see an opening ``` but no closing, cleanup any remaining backticks
// to avoid incomplete formatting markers // to avoid incomplete formatting markers
if !content.contains('```') { if !content.contains('```') {
@@ -47,16 +46,16 @@ fn extract_template(raw_content string) string {
} }
} }
} }
return content return content
} }
pub fn convert_pug_file(myfile string)! { pub fn convert_pug_file(myfile string) ! {
println(myfile) println(myfile)
// Create new file path by replacing .pug extension with .jet // Create new file path by replacing .pug extension with .jet
jet_file := myfile.replace('.pug', '.jet') jet_file := myfile.replace('.pug', '.jet')
// Check if jet file already exists, if so skip processing // Check if jet file already exists, if so skip processing
mut jet_path_exist := pathlib.get_file(path: jet_file, create: false)! mut jet_path_exist := pathlib.get_file(path: jet_file, create: false)!
if jet_path_exist.exists() { if jet_path_exist.exists() {
@@ -69,7 +68,7 @@ pub fn convert_pug_file(myfile string)! {
mut l := loader() mut l := loader()
mut client := openai.get()! mut client := openai.get()!
base_instruction := ' base_instruction := '
You are a template language converter. You convert Pug templates to Jet templates. You are a template language converter. You convert Pug templates to Jet templates.
@@ -82,25 +81,24 @@ pub fn convert_pug_file(myfile string)! {
only output the resulting template, no explanation, no steps, just the jet template only output the resulting template, no explanation, no steps, just the jet template
' '
// We'll retry up to 5 times if validation fails // We'll retry up to 5 times if validation fails
max_attempts := 5 max_attempts := 5
mut attempts := 0 mut attempts := 0
mut is_valid := false mut is_valid := false
mut error_message := '' mut error_message := ''
mut template := '' mut template := ''
for attempts < max_attempts && !is_valid { for attempts < max_attempts && !is_valid {
attempts++ attempts++
mut system_content := texttools.dedent(base_instruction) + "\n" + l.jet() mut system_content := texttools.dedent(base_instruction) + '\n' + l.jet()
mut user_prompt := '' mut user_prompt := ''
// Create different prompts for first attempt vs retries // Create different prompts for first attempt vs retries
if attempts == 1 { if attempts == 1 {
// First attempt - convert from PUG // First attempt - convert from PUG
user_prompt = texttools.dedent(base_user_prompt) + "\n" + content user_prompt = texttools.dedent(base_user_prompt) + '\n' + content
// Print what we're sending to the AI service // Print what we're sending to the AI service
println('Sending to OpenAI for conversion:') println('Sending to OpenAI for conversion:')
println('--------------------------------') println('--------------------------------')
@@ -127,53 +125,57 @@ Please fix the template and try again. Learn from feedback and check which jet t
Return only the corrected Jet template. Return only the corrected Jet template.
Dont send back more information than the fixed template, make sure its in jet format. Dont send back more information than the fixed template, make sure its in jet format.
' ' // Print what we're sending for the retry
// Print what we're sending for the retry
println('Sending to OpenAI for correction:') println('Sending to OpenAI for correction:')
println('--------------------------------') println('--------------------------------')
println(user_prompt) println(user_prompt)
println('--------------------------------') println('--------------------------------')
} }
mut m := openai.Messages{ mut m := openai.Messages{
messages: [ messages: [
openai.Message{ openai.Message{
role: .system role: .system
content: system_content content: system_content
}, },
openai.Message{ openai.Message{
role: .user role: .user
content: user_prompt content: user_prompt
}, },
]} ]
}
// Create a chat completion request // Create a chat completion request
res := client.chat_completion(msgs: m, model: "deepseek-r1-distill-llama-70b", max_completion_tokens: 64000)! res := client.chat_completion(
msgs: m
println("-----") model: 'deepseek-r1-distill-llama-70b'
max_completion_tokens: 64000
)!
println('-----')
// Print AI response before extraction // Print AI response before extraction
println('Response received from AI:') println('Response received from AI:')
println('--------------------------------') println('--------------------------------')
println(res.choices[0].message.content) println(res.choices[0].message.content)
println('--------------------------------') println('--------------------------------')
// Extract the template from the AI response // Extract the template from the AI response
template = extract_template(res.choices[0].message.content) template = extract_template(res.choices[0].message.content)
println('Extracted template for ${myfile}:') println('Extracted template for ${myfile}:')
println('--------------------------------') println('--------------------------------')
println(template) println(template)
println('--------------------------------') println('--------------------------------')
// Validate the template // Validate the template
validation_result := jetvaliditycheck(template) or { validation_result := jetvaliditycheck(template) or {
// If validation service is unavailable, we'll just proceed with the template // If validation service is unavailable, we'll just proceed with the template
println('Warning: Template validation service unavailable: ${err}') println('Warning: Template validation service unavailable: ${err}')
break break
} }
// Check if template is valid // Check if template is valid
if validation_result.is_valid { if validation_result.is_valid {
is_valid = true is_valid = true
@@ -183,19 +185,19 @@ Dont send back more information than the fixed template, make sure its in jet fo
println('Template validation failed: ${error_message}') println('Template validation failed: ${error_message}')
} }
} }
// Report the validation outcome // Report the validation outcome
if is_valid { if is_valid {
println('Successfully converted template after ${attempts} attempt(s)') println('Successfully converted template after ${attempts} attempt(s)')
// Create the file and write the processed content // Create the file and write the processed content
println("Converted to: ${jet_file}") println('Converted to: ${jet_file}')
mut jet_path := pathlib.get_file(path: jet_file, create: true)! mut jet_path := pathlib.get_file(path: jet_file, create: true)!
jet_path.write(template)! jet_path.write(template)!
} else if attempts >= max_attempts { } else if attempts >= max_attempts {
println('Warning: Could not validate template after ${max_attempts} attempts') println('Warning: Could not validate template after ${max_attempts} attempts')
println('Using best attempt despite validation errors: ${error_message}') println('Using best attempt despite validation errors: ${error_message}')
jet_file2:=jet_file.replace(".jet","_error.jet") jet_file2 := jet_file.replace('.jet', '_error.jet')
mut jet_path2 := pathlib.get_file(path: jet_file2, create: true)! mut jet_path2 := pathlib.get_file(path: jet_file2, create: true)!
jet_path2.write(template)! jet_path2.write(template)!
} }
} }

View File

@@ -5,9 +5,9 @@ import json
// JetTemplateResponse is the expected response structure from the validation service // JetTemplateResponse is the expected response structure from the validation service
struct JetTemplateResponse { struct JetTemplateResponse {
valid bool valid bool
message string message string
error string error string
} }
// ValidationResult represents the result of a template validation // ValidationResult represents the result of a template validation
@@ -30,7 +30,7 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
template_data := json.encode({ template_data := json.encode({
'template': jetcontent 'template': jetcontent
}) })
// Print what we're sending to the AI service // Print what we're sending to the AI service
// println('Sending to JET validation service:') // println('Sending to JET validation service:')
// println('--------------------------------') // println('--------------------------------')
@@ -39,8 +39,8 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// Send the POST request to the validation endpoint // Send the POST request to the validation endpoint
req := httpconnection.Request{ req := httpconnection.Request{
prefix: 'checkjet', prefix: 'checkjet'
data: template_data, data: template_data
dataformat: .json dataformat: .json
} }
@@ -49,7 +49,7 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// Handle connection errors // Handle connection errors
return ValidationResult{ return ValidationResult{
is_valid: false is_valid: false
error: 'Connection error: ${err}' error: 'Connection error: ${err}'
} }
} }
@@ -58,12 +58,12 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
// If we can't parse JSON using our struct, the server didn't return the expected format // If we can't parse JSON using our struct, the server didn't return the expected format
return ValidationResult{ return ValidationResult{
is_valid: false is_valid: false
error: 'Server returned unexpected format: ${err.msg()}' error: 'Server returned unexpected format: ${err.msg()}'
} }
} }
// Use the structured response data // Use the structured response data
if response.valid == false{ if response.valid == false {
error_msg := if response.error != '' { error_msg := if response.error != '' {
response.error response.error
} else if response.message != '' { } else if response.message != '' {
@@ -74,12 +74,12 @@ pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
return ValidationResult{ return ValidationResult{
is_valid: false is_valid: false
error: error_msg error: error_msg
} }
} }
return ValidationResult{ return ValidationResult{
is_valid: true is_valid: true
error: '' error: ''
} }
} }

View File

@@ -10,12 +10,11 @@ pub mut:
} }
fn (mut loader FileLoader) load() { fn (mut loader FileLoader) load() {
loader.embedded_files["jet"]=$embed_file('templates/jet_instructions.md') loader.embedded_files['jet'] = $embed_file('templates/jet_instructions.md')
} }
fn (mut loader FileLoader) jet() string { fn (mut loader FileLoader) jet() string {
c:=loader.embedded_files["jet"] or { panic("bug embed") } c := loader.embedded_files['jet'] or { panic('bug embed') }
return c.to_string() return c.to_string()
} }
@@ -23,4 +22,4 @@ fn loader() FileLoader {
mut loader := FileLoader{} mut loader := FileLoader{}
loader.load() loader.load()
return loader return loader
} }

View File

@@ -8,47 +8,47 @@ import os
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str() path := arguments['path'].str()
// Check if path exists // Check if path exists
if !os.exists(path) { if !os.exists(path) {
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: true is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist") content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
} }
} }
// Determine if path is a file or directory // Determine if path is a file or directory
is_directory := os.is_dir(path) is_directory := os.is_dir(path)
mut message := "" mut message := ''
if is_directory { if is_directory {
// Convert all pug files in the directory // Convert all pug files in the directory
pugconvert.convert_pug(path) or { pugconvert.convert_pug(path) or {
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: true is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error converting pug files in directory: ${err}") content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
} }
} }
message = "Successfully converted all pug files in directory '${path}'" message = "Successfully converted all pug files in directory '${path}'"
} else if path.ends_with(".pug") { } else if path.ends_with('.pug') {
// Convert a single pug file // Convert a single pug file
pugconvert.convert_pug_file(path) or { pugconvert.convert_pug_file(path) or {
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: true is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error converting pug file: ${err}") content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
} }
} }
message = "Successfully converted pug file '${path}'" message = "Successfully converted pug file '${path}'"
} else { } else {
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: true is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file") content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
} }
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](message) content: mcp.result_to_mcp_tool_contents[string](message)
} }
} }

View File

@@ -1,18 +1,18 @@
module mcp module mcp
import freeflowuniverse.herolib.ai.mcp import freeflowuniverse.herolib.ai.mcp
import x.json2 as json { Any } import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.ai.mcp.logger import freeflowuniverse.herolib.ai.mcp.logger
const specs = mcp.Tool{ const specs = mcp.Tool{
name: 'pugconvert' name: 'pugconvert'
description: 'Convert Pug template files to Jet template files' description: 'Convert Pug template files to Jet template files'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{ 'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string', typ: 'string'
description: 'Path to a .pug file or directory containing .pug files to convert' description: 'Path to a .pug file or directory containing .pug files to convert'
}) })
} }

View File

@@ -9,7 +9,7 @@ fn main() {
log.error('Failed to create MCP server: ${err}') log.error('Failed to create MCP server: ${err}')
return return
} }
// Start the server // Start the server
server.start() or { server.start() or {
log.error('Failed to start MCP server: ${err}') log.error('Failed to start MCP server: ${err}')

View File

@@ -4,163 +4,175 @@ import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
import os import os
fn main() { fn main() {
// Get the current directory // Get the current directory
current_dir := os.dir(@FILE) current_dir := os.dir(@FILE)
// Check if a source code path was provided as an argument
if os.args.len < 2 {
println('Please provide the path to the source code directory as an argument')
println('Example: ./example.vsh /path/to/source/code/directory')
return
}
// Get the source code path from the command line arguments
source_code_path := os.args[1]
// Check if the path exists and is a directory
if !os.exists(source_code_path) {
println('Source code path does not exist: ${source_code_path}')
return
}
if !os.is_dir(source_code_path) {
println('Source code path is not a directory: ${source_code_path}')
return
}
// Get all Rust files in the directory
files := os.ls(source_code_path) or {
println('Failed to list files in directory: ${err}')
return
}
// Combine all Rust files into a single source code string
mut source_code := ''
for file in files {
file_path := os.join_path(source_code_path, file)
// Skip directories and non-Rust files
if os.is_dir(file_path) || !file.ends_with('.rs') {
continue
}
// Read the file content
file_content := os.read_file(file_path) or {
println('Failed to read file ${file_path}: ${err}')
continue
}
// Add file content to the combined source code
source_code += '// File: ${file}\n${file_content}\n\n'
}
if source_code == '' {
println('No Rust files found in directory: ${source_code_path}')
return
}
// Read the rhaiwrapping.md file
rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
println('Failed to read rhaiwrapping.md: ${err}')
return
}
// Determine the crate path from the source code path
// Extract the path relative to the src directory
src_index := source_code_path.index('src/') or {
println('Could not determine crate path: src/ not found in path')
return
}
mut path_parts := source_code_path[src_index+4..].split('/')
// Remove the last part (the file name)
if path_parts.len > 0 {
path_parts.delete_last()
}
rel_path := path_parts.join('::')
crate_path := 'sal::${rel_path}'
// Create a new task
mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
)
// Create model configs
sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
gpt4_model := escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Extract the module name from the directory path (last component)
dir_parts := source_code_path.split('/')
name := dir_parts[dir_parts.len - 1]
// Create the prompt with source code, wrapper example, and rhai_wrapping_md
prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or { '' }, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or { '' }, crate_path)
// Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string {
return prompt_content
}
gen := RhaiGen{ // Check if a source code path was provided as an argument
name: name if os.args.len < 2 {
dir: source_code_path println('Please provide the path to the source code directory as an argument')
} println('Example: ./example.vsh /path/to/source/code/directory')
return
// Define a single unit task that handles everything }
task.new_unit_task(
name: 'create_rhai_wrappers' // Get the source code path from the command line arguments
prompt_function: prompt_function source_code_path := os.args[1]
callback_function: gen.process_rhai_wrappers
base_model: sonnet_model // Check if the path exists and is a directory
retry_model: gpt4_model if !os.exists(source_code_path) {
retry_count: 1 println('Source code path does not exist: ${source_code_path}')
) return
}
// Initiate the task
result := task.initiate('') or { if !os.is_dir(source_code_path) {
println('Task failed: ${err}') println('Source code path is not a directory: ${source_code_path}')
return return
} }
println('Task completed successfully') // Get all Rust files in the directory
println('The wrapper files have been generated and compiled in the target directory.') files := os.ls(source_code_path) or {
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.') println('Failed to list files in directory: ${err}')
return
}
// Combine all Rust files into a single source code string
mut source_code := ''
for file in files {
file_path := os.join_path(source_code_path, file)
// Skip directories and non-Rust files
if os.is_dir(file_path) || !file.ends_with('.rs') {
continue
}
// Read the file content
file_content := os.read_file(file_path) or {
println('Failed to read file ${file_path}: ${err}')
continue
}
// Add file content to the combined source code
source_code += '// File: ${file}\n${file_content}\n\n'
}
if source_code == '' {
println('No Rust files found in directory: ${source_code_path}')
return
}
// Read the rhaiwrapping.md file
rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
println('Failed to read rhaiwrapping.md: ${err}')
return
}
// Determine the crate path from the source code path
// Extract the path relative to the src directory
src_index := source_code_path.index('src/') or {
println('Could not determine crate path: src/ not found in path')
return
}
mut path_parts := source_code_path[src_index + 4..].split('/')
// Remove the last part (the file name)
if path_parts.len > 0 {
path_parts.delete_last()
}
rel_path := path_parts.join('::')
crate_path := 'sal::${rel_path}'
// Create a new task
mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
)
// Create model configs
sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic'
temperature: 0.7
max_tokens: 25000
}
gpt4_model := escalayer.ModelConfig{
name: 'gpt-4'
provider: 'openai'
temperature: 0.7
max_tokens: 25000
}
// Extract the module name from the directory path (last component)
dir_parts := source_code_path.split('/')
name := dir_parts[dir_parts.len - 1]
// Create the prompt with source code, wrapper example, and rhai_wrapping_md
prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or {
''
}, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or {
''
}, crate_path)
// Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string {
return prompt_content
}
gen := RhaiGen{
name: name
dir: source_code_path
}
// Define a single unit task that handles everything
task.new_unit_task(
name: 'create_rhai_wrappers'
prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers
base_model: sonnet_model
retry_model: gpt4_model
retry_count: 1
)
// Initiate the task
result := task.initiate('') or {
println('Task failed: ${err}')
return
}
println('Task completed successfully')
println('The wrapper files have been generated and compiled in the target directory.')
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
} }
// Define the prompt functions // Define the prompt functions
fn separate_functions(input string) string { fn separate_functions(input string) string {
return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}' return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}'
} }
fn create_wrappers(input string) string { fn create_wrappers(input string) string {
return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}' return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}'
} }
fn create_example(input string) string { fn create_example(input string) string {
return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}' return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}'
} }
// Define a Rhai wrapper generator function for Container functions // Define a Rhai wrapper generator function for Container functions
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string { fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or { panic('Failed to read guides') } guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or {
engine := $tmpl('./prompts/engine.md') panic('Failed to read guides')
vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or { panic('Failed to read guides') } }
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or { panic('Failed to read guides') } engine := $tmpl('./prompts/engine.md')
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or { panic('Failed to read guides') } vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or {
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs') panic('Failed to read guides')
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files. }
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or {
panic('Failed to read guides')
}
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or {
panic('Failed to read guides')
}
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
${guides} ${guides}
${vector_vs_array} ${vector_vs_array}
${example_rhai} ${example_rhai}
@@ -267,263 +279,254 @@ your engine create function is called `create_rhai_engine`
@[params] @[params]
pub struct WrapperModule { pub struct WrapperModule {
pub: pub:
lib_rs string lib_rs string
example_rs string example_rs string
engine_rs string engine_rs string
cargo_toml string cargo_toml string
example_rhai string example_rhai string
generic_wrapper_rs string generic_wrapper_rs string
wrapper_rs string wrapper_rs string
} }
// functions is a list of function names that AI should extract and pass in // functions is a list of function names that AI should extract and pass in
fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string)! string { fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
// Define project directory paths // Define project directory paths
name := name_ name := name_
project_dir := '${base_dir}/rhai' project_dir := '${base_dir}/rhai'
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(base_dir) or {
return error('Failed to change directory to base directory: ${err}')
}
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or {
return error('Failed to create examples directory: ${err}')
}
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
}
// Write the wrapper.rs file // Create the project using cargo new --lib
if wrapper.wrapper_rs != '' { if os.exists(project_dir) {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or { os.rmdir_all(project_dir) or {
return error('Failed to write wrapper.rs: ${err}') return error('Failed to clean existing project directory: ${err}')
} }
} }
// Write the generic wrapper.rs file // Run cargo new --lib to create the project
if wrapper.generic_wrapper_rs != '' { os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}') cargo_new_result := os.execute('cargo new --lib rhai')
} if cargo_new_result.exit_code != 0 {
} return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Write the example.rs file
if wrapper.example_rs != '' { // Create examples directory
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or { examples_dir := '${project_dir}/examples'
return error('Failed to write example.rs: ${err}') os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
}
} // Write the lib.rs file
if wrapper.lib_rs != '' {
// Write the engine.rs file if provided os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
if wrapper.engine_rs != '' { return error('Failed to write lib.rs: ${err}')
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or { }
return error('Failed to write engine.rs: ${err}') }
}
} // Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
// Write the Cargo.toml file os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
if wrapper.cargo_toml != '' { return error('Failed to write wrapper.rs: ${err}')
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or { }
return error('Failed to write Cargo.toml: ${err}') }
}
} // Write the generic wrapper.rs file
if wrapper.generic_wrapper_rs != '' {
// Write the example.rhai file if provided os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
if wrapper.example_rhai != '' { return error('Failed to write generic wrapper.rs: ${err}')
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or { }
return error('Failed to write example.rhai: ${err}') }
}
} // Write the example.rs file
if wrapper.example_rs != '' {
return project_dir os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
if wrapper.cargo_toml != '' {
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
}
// Write the example.rhai file if provided
if wrapper.example_rhai != '' {
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
}
return project_dir
} }
// Helper function to extract code blocks from the response // Helper function to extract code blocks from the response
fn extract_code_block(response string, identifier string, language string) string { fn extract_code_block(response string, identifier string, language string) string {
// Find the start marker for the code block // Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}' mut start_marker := '```${language}\n// ${identifier}'
if language == '' { if language == '' {
start_marker = '```\n// ${identifier}' start_marker = '```\n// ${identifier}'
} }
start_index := response.index(start_marker) or { start_index := response.index(start_marker) or {
// Try alternative format // Try alternative format
mut alt_marker := '```${language}\n${identifier}' mut alt_marker := '```${language}\n${identifier}'
if language == '' { if language == '' {
alt_marker = '```\n${identifier}' alt_marker = '```\n${identifier}'
} }
response.index(alt_marker) or { response.index(alt_marker) or { return '' }
return '' }
}
} // Find the end marker
end_marker := '```'
// Find the end marker end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or { // Extract the content between the markers
return '' content_start := start_index + start_marker.len
} content := response[content_start..end_index].trim_space()
// Extract the content between the markers return content
content_start := start_index + start_marker.len
content := response[content_start..end_index].trim_space()
return content
} }
// Extract module name from wrapper code // Extract module name from wrapper code
fn extract_module_name(code string) string { fn extract_module_name(code string) string {
lines := code.split('\n') lines := code.split('\n')
for line in lines { for line in lines {
// Look for pub mod or mod declarations // Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') { if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name // Extract module name
mut parts := []string{} mut parts := []string{}
if line.contains('pub mod ') { if line.contains('pub mod ') {
parts = line.split('pub mod ') parts = line.split('pub mod ')
} else { } else {
parts = line.split('mod ') parts = line.split('mod ')
} }
if parts.len > 1 { if parts.len > 1 {
// Extract the module name and remove any trailing characters // Extract the module name and remove any trailing characters
mut name := parts[1].trim_space() mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace // Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space() name = name.trim_right('{').trim_right(';').trim_space()
if name != '' { if name != '' {
return name return name
} }
} }
} }
} }
return '' return ''
} }
struct RhaiGen { struct RhaiGen {
name string name string
dir string dir string
} }
// Define the callback function that processes the response and compiles the code // Define the callback function that processes the response and compiles the code
fn (gen RhaiGen)process_rhai_wrappers(response string)! string { fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
// Extract wrapper.rs content // Extract wrapper.rs content
wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust') wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' { if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```') return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
} }
// Extract engine.rs content // Extract engine.rs content
mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust') mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' { if engine_rs_content == '' {
// Try to extract from the response without explicit language marker // Try to extract from the response without explicit language marker
engine_rs_content = extract_code_block(response, 'engine.rs', '') engine_rs_content = extract_code_block(response, 'engine.rs', '')
// if engine_rs_content == '' { // if engine_rs_content == '' {
// // Use the template engine.rs // // Use the template engine.rs
// engine_rs_content = $tmpl('./templates/engine.rs') // engine_rs_content = $tmpl('./templates/engine.rs')
// } // }
} }
// Extract example.rhai content // Extract example.rhai content
mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai') mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' { if example_rhai_content == '' {
// Try to extract from the response without explicit language marker // Try to extract from the response without explicit language marker
example_rhai_content = extract_code_block(response, 'example.rhai', '') example_rhai_content = extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' { if example_rhai_content == '' {
// Use the example from the template // Use the example from the template
example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or { example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
return error('Failed to read example.rhai template: ${err}') return error('Failed to read example.rhai template: ${err}')
} }
// Extract the code block from the markdown file // Extract the code block from the markdown file
example_rhai_content = extract_code_block(example_script_md, 'example.rhai', 'rhai') example_rhai_content = extract_code_block(example_script_md, 'example.rhai',
if example_rhai_content == '' { 'rhai')
return error('Failed to extract example.rhai from template file') if example_rhai_content == '' {
} return error('Failed to extract example.rhai from template file')
} }
} }
}
// Extract function names from the wrapper.rs content
functions := extract_functions_from_code(wrapper_rs_content) // Extract function names from the wrapper.rs content
functions := extract_functions_from_code(wrapper_rs_content)
println('Using module name: ${gen.name}_rhai')
println('Extracted functions: ${functions.join(", ")}') println('Using module name: ${gen.name}_rhai')
println('Extracted functions: ${functions.join(', ')}')
name := gen.name
// Create a WrapperModule struct with the extracted content name := gen.name
wrapper := WrapperModule{ // Create a WrapperModule struct with the extracted content
lib_rs: $tmpl('./templates/lib.rs') wrapper := WrapperModule{
wrapper_rs: wrapper_rs_content lib_rs: $tmpl('./templates/lib.rs')
example_rs: $tmpl('./templates/example.rs') wrapper_rs: wrapper_rs_content
engine_rs: engine_rs_content example_rs: $tmpl('./templates/example.rs')
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs') engine_rs: engine_rs_content
cargo_toml: $tmpl('./templates/cargo.toml') generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
example_rhai: example_rhai_content cargo_toml: $tmpl('./templates/cargo.toml')
} example_rhai: example_rhai_content
}
// Create the wrapper module
base_target_dir := gen.dir // Create the wrapper module
project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or { base_target_dir := gen.dir
return error('Failed to create wrapper module: ${err}') project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or {
} return error('Failed to create wrapper module: ${err}')
}
// Run the example
os.chdir(project_dir) or { // Run the example
return error('Failed to change directory to project: ${err}') os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
}
// Run cargo build first
// Run cargo build first build_result := os.execute('cargo build')
build_result := os.execute('cargo build') if build_result.exit_code != 0 {
if build_result.exit_code != 0 { return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}') }
}
// Run the example
// Run the example run_result := os.execute('cargo run --example example')
run_result := os.execute('cargo run --example example')
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
} }
// Extract function names from wrapper code // Extract function names from wrapper code
fn extract_functions_from_code(code string) []string { fn extract_functions_from_code(code string) []string {
mut functions := []string{} mut functions := []string{}
lines := code.split('\n') lines := code.split('\n')
for line in lines { for line in lines {
if line.contains('pub fn ') && !line.contains('//') { if line.contains('pub fn ') && !line.contains('//') {
// Extract function name // Extract function name
parts := line.split('pub fn ') parts := line.split('pub fn ')
if parts.len > 1 { if parts.len > 1 {
name_parts := parts[1].split('(') name_parts := parts[1].split('(')
if name_parts.len > 0 { if name_parts.len > 0 {
fn_name := name_parts[0].trim_space() fn_name := name_parts[0].trim_space()
if fn_name != '' { if fn_name != '' {
functions << fn_name functions << fn_name
} }
} }
} }
} }
} }
return functions return functions
} }

View File

@@ -4,209 +4,204 @@ import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
import os import os
fn main() { fn main() {
// Get the current directory where this script is located // Get the current directory where this script is located
current_dir := os.dir(@FILE) current_dir := os.dir(@FILE)
// Validate command line arguments // Validate command line arguments
source_code_path := validate_command_args() or { source_code_path := validate_command_args() or {
println(err) println(err)
return return
} }
// Read and combine all Rust files in the source directory // Read and combine all Rust files in the source directory
source_code := read_source_code(source_code_path) or { source_code := read_source_code(source_code_path) or {
println(err) println(err)
return return
} }
// Determine the crate path from the source code path // Determine the crate path from the source code path
crate_path := determine_crate_path(source_code_path) or { crate_path := determine_crate_path(source_code_path) or {
println(err) println(err)
return return
} }
// Extract the module name from the directory path (last component) // Extract the module name from the directory path (last component)
name := extract_module_name_from_path(source_code_path) name := extract_module_name_from_path(source_code_path)
// Create the prompt content for the AI // Create the prompt content for the AI
prompt_content := create_rhai_wrappers( prompt_content := create_rhai_wrappers(name, source_code, read_file_safely('${current_dir}/prompts/example_script.md'),
name, read_file_safely('${current_dir}/prompts/wrapper.md'), read_file_safely('${current_dir}/prompts/errors.md'),
source_code, crate_path)
read_file_safely('${current_dir}/prompts/example_script.md'),
read_file_safely('${current_dir}/prompts/wrapper.md'), // Create the generator instance
read_file_safely('${current_dir}/prompts/errors.md'), gen := RhaiGen{
crate_path name: name
) dir: source_code_path
}
// Create the generator instance
gen := RhaiGen{ // Run the task to generate Rhai wrappers
name: name run_wrapper_generation_task(prompt_content, gen) or {
dir: source_code_path println('Task failed: ${err}')
} return
}
// Run the task to generate Rhai wrappers
run_wrapper_generation_task(prompt_content, gen) or { println('Task completed successfully')
println('Task failed: ${err}') println('The wrapper files have been generated and compiled in the target directory.')
return println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
}
println('Task completed successfully')
println('The wrapper files have been generated and compiled in the target directory.')
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
} }
// Validates command line arguments and returns the source code path // Validates command line arguments and returns the source code path
fn validate_command_args() !string { fn validate_command_args() !string {
if os.args.len < 2 { if os.args.len < 2 {
return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory') return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory')
} }
source_code_path := os.args[1] source_code_path := os.args[1]
if !os.exists(source_code_path) { if !os.exists(source_code_path) {
return error('Source code path does not exist: ${source_code_path}') return error('Source code path does not exist: ${source_code_path}')
} }
if !os.is_dir(source_code_path) { if !os.is_dir(source_code_path) {
return error('Source code path is not a directory: ${source_code_path}') return error('Source code path is not a directory: ${source_code_path}')
} }
return source_code_path return source_code_path
} }
// Reads and combines all Rust files in the given directory // Reads and combines all Rust files in the given directory
fn read_source_code(source_code_path string) !string { fn read_source_code(source_code_path string) !string {
// Get all files in the directory // Get all files in the directory
files := os.ls(source_code_path) or { files := os.ls(source_code_path) or {
return error('Failed to list files in directory: ${err}') return error('Failed to list files in directory: ${err}')
} }
// Combine all Rust files into a single source code string // Combine all Rust files into a single source code string
mut source_code := '' mut source_code := ''
for file in files { for file in files {
file_path := os.join_path(source_code_path, file) file_path := os.join_path(source_code_path, file)
// Skip directories and non-Rust files // Skip directories and non-Rust files
if os.is_dir(file_path) || !file.ends_with('.rs') { if os.is_dir(file_path) || !file.ends_with('.rs') {
continue continue
} }
// Read the file content // Read the file content
file_content := os.read_file(file_path) or { file_content := os.read_file(file_path) or {
println('Failed to read file ${file_path}: ${err}') println('Failed to read file ${file_path}: ${err}')
continue continue
} }
// Add file content to the combined source code // Add file content to the combined source code
source_code += '// File: ${file}\n${file_content}\n\n' source_code += '// File: ${file}\n${file_content}\n\n'
} }
if source_code == '' { if source_code == '' {
return error('No Rust files found in directory: ${source_code_path}') return error('No Rust files found in directory: ${source_code_path}')
} }
return source_code return source_code
} }
// Determines the crate path from the source code path // Determines the crate path from the source code path
fn determine_crate_path(source_code_path string) !string { fn determine_crate_path(source_code_path string) !string {
// Extract the path relative to the src directory // Extract the path relative to the src directory
src_index := source_code_path.index('src/') or { src_index := source_code_path.index('src/') or {
return error('Could not determine crate path: src/ not found in path') return error('Could not determine crate path: src/ not found in path')
} }
mut path_parts := source_code_path[src_index+4..].split('/') mut path_parts := source_code_path[src_index + 4..].split('/')
// Remove the last part (the file name) // Remove the last part (the file name)
if path_parts.len > 0 { if path_parts.len > 0 {
path_parts.delete_last() path_parts.delete_last()
} }
rel_path := path_parts.join('::') rel_path := path_parts.join('::')
return 'sal::${rel_path}' return 'sal::${rel_path}'
} }
// Extracts the module name from a directory path // Extracts the module name from a directory path
fn extract_module_name_from_path(path string) string { fn extract_module_name_from_path(path string) string {
dir_parts := path.split('/') dir_parts := path.split('/')
return dir_parts[dir_parts.len - 1] return dir_parts[dir_parts.len - 1]
} }
// Helper function to read a file or return empty string if file doesn't exist // Helper function to read a file or return empty string if file doesn't exist
fn read_file_safely(file_path string) string { fn read_file_safely(file_path string) string {
return os.read_file(file_path) or { '' } return os.read_file(file_path) or { '' }
} }
// Runs the task to generate Rhai wrappers // Runs the task to generate Rhai wrappers
fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string { fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
// Create a new task // Create a new task
mut task := escalayer.new_task( mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer' name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file' description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
) )
// Create model configs // Create model configs
sonnet_model := escalayer.ModelConfig{ sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet' name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic' provider: 'anthropic'
temperature: 0.7 temperature: 0.7
max_tokens: 25000 max_tokens: 25000
} }
gpt4_model := escalayer.ModelConfig{ gpt4_model := escalayer.ModelConfig{
name: 'gpt-4' name: 'gpt-4'
provider: 'openai' provider: 'openai'
temperature: 0.7 temperature: 0.7
max_tokens: 25000 max_tokens: 25000
} }
// Create a prompt function that returns the prepared content // Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string { prompt_function := fn [prompt_content] (input string) string {
return prompt_content return prompt_content
} }
// Define a single unit task that handles everything // Define a single unit task that handles everything
task.new_unit_task( task.new_unit_task(
name: 'create_rhai_wrappers' name: 'create_rhai_wrappers'
prompt_function: prompt_function prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers callback_function: gen.process_rhai_wrappers
base_model: sonnet_model base_model: sonnet_model
retry_model: gpt4_model retry_model: gpt4_model
retry_count: 1 retry_count: 1
) )
// Initiate the task // Initiate the task
return task.initiate('') return task.initiate('')
} }
// Define a Rhai wrapper generator function for Container functions // Define a Rhai wrapper generator function for Container functions
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string { fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
// Load all required template and guide files // Load all required template and guide files
guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')
engine := $tmpl('./prompts/engine.md') engine := $tmpl('./prompts/engine.md')
vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')
rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')
rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs') generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
// Build the prompt content // Build the prompt content
return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md, return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md,
guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide, guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide, generic_wrapper_rs,
generic_wrapper_rs, engine) engine)
} }
// Helper function to load guide files with error handling // Helper function to load guide files with error handling
fn load_guide_file(path string) string { fn load_guide_file(path string) string {
return os.read_file(path) or { return os.read_file(path) or {
eprintln('Warning: Failed to read guide file: ${path}') eprintln('Warning: Failed to read guide file: ${path}')
return '' return ''
} }
} }
// Builds the prompt content for the AI // Builds the prompt content for the AI
fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string, fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string,
errors_md string, guides string, vector_vs_array string, errors_md string, guides string, vector_vs_array string,
rhai_integration_fixes string, rhai_syntax_guide string, rhai_integration_fixes string, rhai_syntax_guide string,
generic_wrapper_rs string, engine string) string { generic_wrapper_rs string, engine string) string {
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files. return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
${guides} ${guides}
${vector_vs_array} ${vector_vs_array}
${example_rhai} ${example_rhai}
@@ -313,305 +308,289 @@ your engine create function is called `create_rhai_engine`
@[params] @[params]
pub struct WrapperModule { pub struct WrapperModule {
pub: pub:
lib_rs string lib_rs string
example_rs string example_rs string
engine_rs string engine_rs string
cargo_toml string cargo_toml string
example_rhai string example_rhai string
generic_wrapper_rs string generic_wrapper_rs string
wrapper_rs string wrapper_rs string
} }
// functions is a list of function names that AI should extract and pass in // functions is a list of function names that AI should extract and pass in
fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string)! string { fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
// Define project directory paths // Define project directory paths
name := name_ name := name_
project_dir := '${base_dir}/rhai' project_dir := '${base_dir}/rhai'
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(base_dir) or {
return error('Failed to change directory to base directory: ${err}')
}
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or {
return error('Failed to create examples directory: ${err}')
}
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
}
// Write the wrapper.rs file // Create the project using cargo new --lib
if wrapper.wrapper_rs != '' { if os.exists(project_dir) {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or { os.rmdir_all(project_dir) or {
return error('Failed to write wrapper.rs: ${err}') return error('Failed to clean existing project directory: ${err}')
} }
} }
// Write the generic wrapper.rs file // Run cargo new --lib to create the project
if wrapper.generic_wrapper_rs != '' { os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}') cargo_new_result := os.execute('cargo new --lib rhai')
} if cargo_new_result.exit_code != 0 {
} return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Write the example.rs file
if wrapper.example_rs != '' { // Create examples directory
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or { examples_dir := '${project_dir}/examples'
return error('Failed to write example.rs: ${err}') os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
}
} // Write the lib.rs file
if wrapper.lib_rs != '' {
// Write the engine.rs file if provided os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
if wrapper.engine_rs != '' { return error('Failed to write lib.rs: ${err}')
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or { }
return error('Failed to write engine.rs: ${err}') }
}
} // Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
// Write the Cargo.toml file os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
if wrapper.cargo_toml != '' { return error('Failed to write wrapper.rs: ${err}')
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or { }
return error('Failed to write Cargo.toml: ${err}') }
}
} // Write the generic wrapper.rs file
if wrapper.generic_wrapper_rs != '' {
// Write the example.rhai file if provided os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
if wrapper.example_rhai != '' { return error('Failed to write generic wrapper.rs: ${err}')
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or { }
return error('Failed to write example.rhai: ${err}') }
}
} // Write the example.rs file
if wrapper.example_rs != '' {
return project_dir os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
if wrapper.cargo_toml != '' {
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
}
// Write the example.rhai file if provided
if wrapper.example_rhai != '' {
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
}
return project_dir
} }
// Helper function to extract code blocks from the response // Helper function to extract code blocks from the response
fn extract_code_block(response string, identifier string, language string) string { fn extract_code_block(response string, identifier string, language string) string {
// Find the start marker for the code block // Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}' mut start_marker := '```${language}\n// ${identifier}'
if language == '' { if language == '' {
start_marker = '```\n// ${identifier}' start_marker = '```\n// ${identifier}'
} }
start_index := response.index(start_marker) or { start_index := response.index(start_marker) or {
// Try alternative format // Try alternative format
mut alt_marker := '```${language}\n${identifier}' mut alt_marker := '```${language}\n${identifier}'
if language == '' { if language == '' {
alt_marker = '```\n${identifier}' alt_marker = '```\n${identifier}'
} }
response.index(alt_marker) or { response.index(alt_marker) or { return '' }
return '' }
}
} // Find the end marker
end_marker := '```'
// Find the end marker end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or { // Extract the content between the markers
return '' content_start := start_index + start_marker.len
} content := response[content_start..end_index].trim_space()
// Extract the content between the markers return content
content_start := start_index + start_marker.len
content := response[content_start..end_index].trim_space()
return content
} }
// Extract module name from wrapper code // Extract module name from wrapper code
fn extract_module_name(code string) string { fn extract_module_name(code string) string {
lines := code.split('\n') lines := code.split('\n')
for line in lines { for line in lines {
// Look for pub mod or mod declarations // Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') { if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name // Extract module name
mut parts := []string{} mut parts := []string{}
if line.contains('pub mod ') { if line.contains('pub mod ') {
parts = line.split('pub mod ') parts = line.split('pub mod ')
} else { } else {
parts = line.split('mod ') parts = line.split('mod ')
} }
if parts.len > 1 { if parts.len > 1 {
// Extract the module name and remove any trailing characters // Extract the module name and remove any trailing characters
mut name := parts[1].trim_space() mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace // Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space() name = name.trim_right('{').trim_right(';').trim_space()
if name != '' { if name != '' {
return name return name
} }
} }
} }
} }
return '' return ''
} }
// RhaiGen struct for generating Rhai wrappers // RhaiGen struct for generating Rhai wrappers
struct RhaiGen { struct RhaiGen {
name string name string
dir string dir string
} }
// Process the AI response and compile the generated code // Process the AI response and compile the generated code
fn (gen RhaiGen)process_rhai_wrappers(response string)! string { fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
// Extract code blocks from the response // Extract code blocks from the response
code_blocks := extract_code_blocks(response) or { code_blocks := extract_code_blocks(response) or { return err }
return err
} // Extract function names from the wrapper.rs content
functions := extract_functions_from_code(code_blocks.wrapper_rs)
// Extract function names from the wrapper.rs content
functions := extract_functions_from_code(code_blocks.wrapper_rs) println('Using module name: ${gen.name}_rhai')
println('Extracted functions: ${functions.join(', ')}')
println('Using module name: ${gen.name}_rhai')
println('Extracted functions: ${functions.join(", ")}') name := gen.name
name := gen.name // Create a WrapperModule struct with the extracted content
wrapper := WrapperModule{
// Create a WrapperModule struct with the extracted content lib_rs: $tmpl('./templates/lib.rs')
wrapper := WrapperModule{ wrapper_rs: code_blocks.wrapper_rs
lib_rs: $tmpl('./templates/lib.rs') example_rs: $tmpl('./templates/example.rs')
wrapper_rs: code_blocks.wrapper_rs engine_rs: code_blocks.engine_rs
example_rs: $tmpl('./templates/example.rs') generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
engine_rs: code_blocks.engine_rs cargo_toml: $tmpl('./templates/cargo.toml')
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs') example_rhai: code_blocks.example_rhai
cargo_toml: $tmpl('./templates/cargo.toml') }
example_rhai: code_blocks.example_rhai
} // Create the wrapper module
project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or {
// Create the wrapper module return error('Failed to create wrapper module: ${err}')
project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or { }
return error('Failed to create wrapper module: ${err}')
} // Build and run the project
build_output, run_output := build_and_run_project(project_dir) or { return err }
// Build and run the project
build_output, run_output := build_and_run_project(project_dir) or { return format_success_message(project_dir, build_output, run_output)
return err
}
return format_success_message(project_dir, build_output, run_output)
} }
// CodeBlocks struct to hold extracted code blocks // CodeBlocks struct to hold extracted code blocks
struct CodeBlocks { struct CodeBlocks {
wrapper_rs string wrapper_rs string
engine_rs string engine_rs string
example_rhai string example_rhai string
} }
// Extract code blocks from the AI response // Extract code blocks from the AI response
fn extract_code_blocks(response string)! CodeBlocks { fn extract_code_blocks(response string) !CodeBlocks {
// Extract wrapper.rs content // Extract wrapper.rs content
wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust') wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' { if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```') return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
} }
// Extract engine.rs content // Extract engine.rs content
mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust') mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' { if engine_rs_content == '' {
// Try to extract from the response without explicit language marker // Try to extract from the response without explicit language marker
engine_rs_content = extract_code_block(response, 'engine.rs', '') engine_rs_content = extract_code_block(response, 'engine.rs', '')
} }
// Extract example.rhai content // Extract example.rhai content
mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai') mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' { if example_rhai_content == '' {
// Try to extract from the response without explicit language marker // Try to extract from the response without explicit language marker
example_rhai_content = extract_code_block(response, 'example.rhai', '') example_rhai_content = extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' { if example_rhai_content == '' {
// Use the example from the template // Use the example from the template
example_rhai_content = load_example_from_template() or { example_rhai_content = load_example_from_template() or { return err }
return err }
} }
}
} return CodeBlocks{
wrapper_rs: wrapper_rs_content
return CodeBlocks{ engine_rs: engine_rs_content
wrapper_rs: wrapper_rs_content example_rhai: example_rhai_content
engine_rs: engine_rs_content }
example_rhai: example_rhai_content
}
} }
// Load example.rhai from template file // Load example.rhai from template file
fn load_example_from_template()! string { fn load_example_from_template() !string {
example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or { example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
return error('Failed to read example.rhai template: ${err}') return error('Failed to read example.rhai template: ${err}')
} }
// Extract the code block from the markdown file // Extract the code block from the markdown file
example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai') example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai')
if example_rhai_content == '' { if example_rhai_content == '' {
return error('Failed to extract example.rhai from template file') return error('Failed to extract example.rhai from template file')
} }
return example_rhai_content return example_rhai_content
} }
// Build and run the project // Build and run the project
fn build_and_run_project(project_dir string)! (string, string) { fn build_and_run_project(project_dir string) !(string, string) {
// Change to the project directory // Change to the project directory
os.chdir(project_dir) or { os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
return error('Failed to change directory to project: ${err}')
} // Run cargo build first
build_result := os.execute('cargo build')
// Run cargo build first if build_result.exit_code != 0 {
build_result := os.execute('cargo build') return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
if build_result.exit_code != 0 { }
return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
} // Run the example
run_result := os.execute('cargo run --example example')
// Run the example
run_result := os.execute('cargo run --example example') return build_result.output, run_result.output
return build_result.output, run_result.output
} }
// Format success message // Format success message
fn format_success_message(project_dir string, build_output string, run_output string) string { fn format_success_message(project_dir string, build_output string, run_output string) string {
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}' return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
} }
// Extract function names from wrapper code // Extract function names from wrapper code
fn extract_functions_from_code(code string) []string { fn extract_functions_from_code(code string) []string {
mut functions := []string{} mut functions := []string{}
lines := code.split('\n') lines := code.split('\n')
for line in lines { for line in lines {
if line.contains('pub fn ') && !line.contains('//') { if line.contains('pub fn ') && !line.contains('//') {
// Extract function name // Extract function name
parts := line.split('pub fn ') parts := line.split('pub fn ')
if parts.len > 1 { if parts.len > 1 {
name_parts := parts[1].split('(') name_parts := parts[1].split('(')
if name_parts.len > 0 { if name_parts.len > 0 {
fn_name := name_parts[0].trim_space() fn_name := name_parts[0].trim_space()
if fn_name != '' { if fn_name != '' {
functions << fn_name functions << fn_name
} }
} }
} }
} }
} }
return functions return functions
} }

View File

@@ -6,285 +6,278 @@ import freeflowuniverse.herolib.ai.utils
import os import os
pub fn generate_rhai_wrapper(name string, source_path string) !string { pub fn generate_rhai_wrapper(name string, source_path string) !string {
// Detect source package and module information // Detect source package and module information
source_pkg_info := rust.detect_source_package(source_path)! source_pkg_info := rust.detect_source_package(source_path)!
source_code := rust.read_source_code(source_path)! source_code := rust.read_source_code(source_path)!
prompt := rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)! prompt := rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
return run_wrapper_generation_task(prompt, RhaiGen{ return run_wrapper_generation_task(prompt, RhaiGen{
name: name name: name
dir: source_path dir: source_path
source_pkg_info: source_pkg_info source_pkg_info: source_pkg_info
})! })!
} }
// Runs the task to generate Rhai wrappers // Runs the task to generate Rhai wrappers
pub fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string { pub fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
// Create a new task // Create a new task
mut task := escalayer.new_task( mut task := escalayer.new_task(
name: 'rhai_wrapper_creator.escalayer' name: 'rhai_wrapper_creator.escalayer'
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file' description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
) )
// Create model configs // Create model configs
sonnet_model := escalayer.ModelConfig{ sonnet_model := escalayer.ModelConfig{
name: 'anthropic/claude-3.7-sonnet' name: 'anthropic/claude-3.7-sonnet'
provider: 'anthropic' provider: 'anthropic'
temperature: 0.7 temperature: 0.7
max_tokens: 25000 max_tokens: 25000
} }
gpt4_model := escalayer.ModelConfig{ gpt4_model := escalayer.ModelConfig{
name: 'gpt-4' name: 'gpt-4'
provider: 'openai' provider: 'openai'
temperature: 0.7 temperature: 0.7
max_tokens: 25000 max_tokens: 25000
} }
// Create a prompt function that returns the prepared content // Create a prompt function that returns the prepared content
prompt_function := fn [prompt_content] (input string) string { prompt_function := fn [prompt_content] (input string) string {
return prompt_content return prompt_content
} }
// Define a single unit task that handles everything // Define a single unit task that handles everything
task.new_unit_task( task.new_unit_task(
name: 'create_rhai_wrappers' name: 'create_rhai_wrappers'
prompt_function: prompt_function prompt_function: prompt_function
callback_function: gen.process_rhai_wrappers callback_function: gen.process_rhai_wrappers
base_model: sonnet_model base_model: sonnet_model
retry_model: gpt4_model retry_model: gpt4_model
retry_count: 1 retry_count: 1
) )
// Initiate the task // Initiate the task
return task.initiate('') return task.initiate('')
} }
// Define a Rhai wrapper generator function for Container functions // Define a Rhai wrapper generator function for Container functions
pub fn rhai_wrapper_generation_prompt(name string, source_code string, source_pkg_info rust.SourcePackageInfo) !string { pub fn rhai_wrapper_generation_prompt(name string, source_code string, source_pkg_info rust.SourcePackageInfo) !string {
current_dir := os.dir(@FILE) current_dir := os.dir(@FILE)
example_rhai := os.read_file('${current_dir}/prompts/example_script.md')! example_rhai := os.read_file('${current_dir}/prompts/example_script.md')!
wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md')! wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md')!
errors_md := os.read_file('${current_dir}/prompts/errors.md')! errors_md := os.read_file('${current_dir}/prompts/errors.md')!
// Load all required template and guide files // Load all required template and guide files
guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')! guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')!
engine := $tmpl('./prompts/engine.md') engine := $tmpl('./prompts/engine.md')
vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')! vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')!
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')! rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')!
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')! rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')!
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs') generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
prompt := $tmpl('./prompts/main.md') prompt := $tmpl('./prompts/main.md')
return prompt return prompt
} }
@[params] @[params]
pub struct WrapperModule { pub struct WrapperModule {
pub: pub:
lib_rs string lib_rs string
example_rs string example_rs string
engine_rs string engine_rs string
cargo_toml string cargo_toml string
example_rhai string example_rhai string
generic_wrapper_rs string generic_wrapper_rs string
wrapper_rs string wrapper_rs string
} }
// functions is a list of function names that AI should extract and pass in // functions is a list of function names that AI should extract and pass in
pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string { pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string) !string {
// Define project directory paths // Define project directory paths
project_dir := '${path}/rhai' project_dir := '${path}/rhai'
// Create the project using cargo new --lib
if os.exists(project_dir) {
os.rmdir_all(project_dir) or {
return error('Failed to clean existing project directory: ${err}')
}
}
// Run cargo new --lib to create the project
os.chdir(path) or {
return error('Failed to change directory to base directory: ${err}')
}
cargo_new_result := os.execute('cargo new --lib rhai')
if cargo_new_result.exit_code != 0 {
return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Create examples directory
examples_dir := '${project_dir}/examples'
os.mkdir_all(examples_dir) or {
return error('Failed to create examples directory: ${err}')
}
// Write the lib.rs file
if wrapper.lib_rs != '' {
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
return error('Failed to write lib.rs: ${err}')
}
} else {
// Use default lib.rs template if none provided
lib_rs_content := $tmpl('./templates/lib.rs')
os.write_file('${project_dir}/src/lib.rs', lib_rs_content) or {
return error('Failed to write lib.rs: ${err}')
}
}
// Write the wrapper.rs file // Create the project using cargo new --lib
if wrapper.wrapper_rs != '' { if os.exists(project_dir) {
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or { os.rmdir_all(project_dir) or {
return error('Failed to write wrapper.rs: ${err}') return error('Failed to clean existing project directory: ${err}')
} }
} }
// Write the generic wrapper.rs file // Run cargo new --lib to create the project
if wrapper.generic_wrapper_rs != '' { os.chdir(path) or { return error('Failed to change directory to base directory: ${err}') }
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
return error('Failed to write generic wrapper.rs: ${err}') cargo_new_result := os.execute('cargo new --lib rhai')
} if cargo_new_result.exit_code != 0 {
} return error('Failed to create new library project: ${cargo_new_result.output}')
}
// Write the example.rs file
if wrapper.example_rs != '' { // Create examples directory
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or { examples_dir := '${project_dir}/examples'
return error('Failed to write example.rs: ${err}') os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
}
} else { // Write the lib.rs file
// Use default example.rs template if none provided if wrapper.lib_rs != '' {
example_rs_content := $tmpl('./templates/example.rs') os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
os.write_file('${examples_dir}/example.rs', example_rs_content) or { return error('Failed to write lib.rs: ${err}')
return error('Failed to write example.rs: ${err}') }
} } else {
} // Use default lib.rs template if none provided
lib_rs_content := $tmpl('./templates/lib.rs')
// Write the engine.rs file if provided os.write_file('${project_dir}/src/lib.rs', lib_rs_content) or {
if wrapper.engine_rs != '' { return error('Failed to write lib.rs: ${err}')
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or { }
return error('Failed to write engine.rs: ${err}') }
}
} // Write the wrapper.rs file
if wrapper.wrapper_rs != '' {
// Write the Cargo.toml file os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or { return error('Failed to write wrapper.rs: ${err}')
return error('Failed to write Cargo.toml: ${err}') }
} }
// Write the example.rhai file // Write the generic wrapper.rs file
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or { if wrapper.generic_wrapper_rs != '' {
return error('Failed to write example.rhai: ${err}') os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
} return error('Failed to write generic wrapper.rs: ${err}')
}
return project_dir }
// Write the example.rs file
if wrapper.example_rs != '' {
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
return error('Failed to write example.rs: ${err}')
}
} else {
// Use default example.rs template if none provided
example_rs_content := $tmpl('./templates/example.rs')
os.write_file('${examples_dir}/example.rs', example_rs_content) or {
return error('Failed to write example.rs: ${err}')
}
}
// Write the engine.rs file if provided
if wrapper.engine_rs != '' {
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
return error('Failed to write engine.rs: ${err}')
}
}
// Write the Cargo.toml file
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
return error('Failed to write Cargo.toml: ${err}')
}
// Write the example.rhai file
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
return error('Failed to write example.rhai: ${err}')
}
return project_dir
} }
// Extract module name from wrapper code // Extract module name from wrapper code
fn extract_module_name(code string) string { fn extract_module_name(code string) string {
lines := code.split('\n') lines := code.split('\n')
for line in lines { for line in lines {
// Look for pub mod or mod declarations // Look for pub mod or mod declarations
if line.contains('pub mod ') || line.contains('mod ') { if line.contains('pub mod ') || line.contains('mod ') {
// Extract module name // Extract module name
mut parts := []string{} mut parts := []string{}
if line.contains('pub mod ') { if line.contains('pub mod ') {
parts = line.split('pub mod ') parts = line.split('pub mod ')
} else { } else {
parts = line.split('mod ') parts = line.split('mod ')
} }
if parts.len > 1 { if parts.len > 1 {
// Extract the module name and remove any trailing characters // Extract the module name and remove any trailing characters
mut name := parts[1].trim_space() mut name := parts[1].trim_space()
// Remove any trailing { or ; or whitespace // Remove any trailing { or ; or whitespace
name = name.trim_right('{').trim_right(';').trim_space() name = name.trim_right('{').trim_right(';').trim_space()
if name != '' { if name != '' {
return name return name
} }
} }
} }
} }
return '' return ''
} }
// RhaiGen struct for generating Rhai wrappers // RhaiGen struct for generating Rhai wrappers
struct RhaiGen { struct RhaiGen {
name string name string
dir string dir string
source_pkg_info rust.SourcePackageInfo source_pkg_info rust.SourcePackageInfo
} }
// Process the AI response and compile the generated code // Process the AI response and compile the generated code
pub fn (gen RhaiGen) process_rhai_wrappers(input string) !string { pub fn (gen RhaiGen) process_rhai_wrappers(input string) !string {
blocks := extract_code_blocks(input)! blocks := extract_code_blocks(input)!
source_pkg_info := gen.source_pkg_info source_pkg_info := gen.source_pkg_info
// Create the module structure // Create the module structure
mod := WrapperModule{ mod := WrapperModule{
lib_rs: blocks.lib_rs lib_rs: blocks.lib_rs
engine_rs: blocks.engine_rs engine_rs: blocks.engine_rs
example_rhai: blocks.example_rhai example_rhai: blocks.example_rhai
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs') generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
wrapper_rs: blocks.wrapper_rs wrapper_rs: blocks.wrapper_rs
} }
// Write the module files // Write the module files
project_dir := write_rhai_wrapper_module(mod, gen.name, gen.dir)! project_dir := write_rhai_wrapper_module(mod, gen.name, gen.dir)!
return project_dir return project_dir
} }
// CodeBlocks struct to hold extracted code blocks // CodeBlocks struct to hold extracted code blocks
struct CodeBlocks { struct CodeBlocks {
wrapper_rs string wrapper_rs string
engine_rs string engine_rs string
example_rhai string example_rhai string
lib_rs string lib_rs string
} }
// Extract code blocks from the AI response // Extract code blocks from the AI response
fn extract_code_blocks(response string)! CodeBlocks { fn extract_code_blocks(response string) !CodeBlocks {
// Extract wrapper.rs content // Extract wrapper.rs content
wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust') wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
if wrapper_rs_content == '' { if wrapper_rs_content == '' {
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```') return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
} }
// Extract engine.rs content // Extract engine.rs content
mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust') mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
if engine_rs_content == '' { if engine_rs_content == '' {
// Try to extract from the response without explicit language marker // Try to extract from the response without explicit language marker
engine_rs_content = utils.extract_code_block(response, 'engine.rs', '') engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
} }
// Extract example.rhai content // Extract example.rhai content
mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai') mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
if example_rhai_content == '' { if example_rhai_content == '' {
// Try to extract from the response without explicit language marker // Try to extract from the response without explicit language marker
example_rhai_content = utils.extract_code_block(response, 'example.rhai', '') example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
if example_rhai_content == '' { if example_rhai_content == '' {
return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```') return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
} }
} }
// Extract lib.rs content // Extract lib.rs content
lib_rs_content := utils.extract_code_block(response, 'lib.rs', 'rust') lib_rs_content := utils.extract_code_block(response, 'lib.rs', 'rust')
if lib_rs_content == '' { if lib_rs_content == '' {
return error('Failed to extract lib.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// lib.rs and ends with ```') return error('Failed to extract lib.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// lib.rs and ends with ```')
} }
return CodeBlocks{ return CodeBlocks{
wrapper_rs: wrapper_rs_content wrapper_rs: wrapper_rs_content
engine_rs: engine_rs_content engine_rs: engine_rs_content
example_rhai: example_rhai_content example_rhai: example_rhai_content
lib_rs: lib_rs_content lib_rs: lib_rs_content
} }
} }
// Format success message // Format success message
fn format_success_message(project_dir string, build_output string, run_output string) string { fn format_success_message(project_dir string, build_output string, run_output string) string {
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}' return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
} }

View File

@@ -20,7 +20,7 @@ import os
// name: 'rhai_wrapper_creator.escalayer' // name: 'rhai_wrapper_creator.escalayer'
// description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file' // description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
// ) // )
// // Create model configs // // Create model configs
// sonnet_model := escalayer.ModelConfig{ // sonnet_model := escalayer.ModelConfig{
// name: 'anthropic/claude-3.7-sonnet' // name: 'anthropic/claude-3.7-sonnet'
@@ -28,19 +28,19 @@ import os
// temperature: 0.7 // temperature: 0.7
// max_tokens: 25000 // max_tokens: 25000
// } // }
// gpt4_model := escalayer.ModelConfig{ // gpt4_model := escalayer.ModelConfig{
// name: 'gpt-4' // name: 'gpt-4'
// provider: 'openai' // provider: 'openai'
// temperature: 0.7 // temperature: 0.7
// max_tokens: 25000 // max_tokens: 25000
// } // }
// // Create a prompt function that returns the prepared content // // Create a prompt function that returns the prepared content
// prompt_function := fn [prompt_content] (input string) string { // prompt_function := fn [prompt_content] (input string) string {
// return prompt_content // return prompt_content
// } // }
// // Define a single unit task that handles everything // // Define a single unit task that handles everything
// task.new_unit_task( // task.new_unit_task(
// name: 'create_rhai_wrappers' // name: 'create_rhai_wrappers'
@@ -50,7 +50,7 @@ import os
// retry_model: gpt4_model // retry_model: gpt4_model
// retry_count: 1 // retry_count: 1
// ) // )
// // Initiate the task // // Initiate the task
// return task.initiate('') // return task.initiate('')
// } // }
@@ -69,33 +69,33 @@ import os
// // functions is a list of function names that AI should extract and pass in // // functions is a list of function names that AI should extract and pass in
// pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string { // pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string {
// // Define project directory paths // // Define project directory paths
// project_dir := '${path}/rhai' // project_dir := '${path}/rhai'
// // Create the project using cargo new --lib // // Create the project using cargo new --lib
// if os.exists(project_dir) { // if os.exists(project_dir) {
// os.rmdir_all(project_dir) or { // os.rmdir_all(project_dir) or {
// return error('Failed to clean existing project directory: ${err}') // return error('Failed to clean existing project directory: ${err}')
// } // }
// } // }
// // Run cargo new --lib to create the project // // Run cargo new --lib to create the project
// os.chdir(path) or { // os.chdir(path) or {
// return error('Failed to change directory to base directory: ${err}') // return error('Failed to change directory to base directory: ${err}')
// } // }
// cargo_new_result := os.execute('cargo new --lib rhai') // cargo_new_result := os.execute('cargo new --lib rhai')
// if cargo_new_result.exit_code != 0 { // if cargo_new_result.exit_code != 0 {
// return error('Failed to create new library project: ${cargo_new_result.output}') // return error('Failed to create new library project: ${cargo_new_result.output}')
// } // }
// // Create examples directory // // Create examples directory
// examples_dir := '${project_dir}/examples' // examples_dir := '${project_dir}/examples'
// os.mkdir_all(examples_dir) or { // os.mkdir_all(examples_dir) or {
// return error('Failed to create examples directory: ${err}') // return error('Failed to create examples directory: ${err}')
// } // }
// // Write the lib.rs file // // Write the lib.rs file
// if wrapper.lib_rs != '' { // if wrapper.lib_rs != '' {
// os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or { // os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
@@ -109,47 +109,45 @@ import os
// return error('Failed to write wrapper.rs: ${err}') // return error('Failed to write wrapper.rs: ${err}')
// } // }
// } // }
// // Write the generic wrapper.rs file // // Write the generic wrapper.rs file
// if wrapper.generic_wrapper_rs != '' { // if wrapper.generic_wrapper_rs != '' {
// os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or { // os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
// return error('Failed to write generic wrapper.rs: ${err}') // return error('Failed to write generic wrapper.rs: ${err}')
// } // }
// } // }
// // Write the example.rs file // // Write the example.rs file
// if wrapper.example_rs != '' { // if wrapper.example_rs != '' {
// os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or { // os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
// return error('Failed to write example.rs: ${err}') // return error('Failed to write example.rs: ${err}')
// } // }
// } // }
// // Write the engine.rs file if provided // // Write the engine.rs file if provided
// if wrapper.engine_rs != '' { // if wrapper.engine_rs != '' {
// os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or { // os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
// return error('Failed to write engine.rs: ${err}') // return error('Failed to write engine.rs: ${err}')
// } // }
// } // }
// // Write the Cargo.toml file // // Write the Cargo.toml file
// os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or { // os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
// return error('Failed to write Cargo.toml: ${err}') // return error('Failed to write Cargo.toml: ${err}')
// } // }
// // Write the example.rhai file // // Write the example.rhai file
// os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or { // os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
// return error('Failed to write example.rhai: ${err}') // return error('Failed to write example.rhai: ${err}')
// } // }
// return project_dir // return project_dir
// } // }
// // Extract module name from wrapper code // // Extract module name from wrapper code
// fn extract_module_name(code string) string { // fn extract_module_name(code string) string {
// lines := code.split('\n') // lines := code.split('\n')
// for line in lines { // for line in lines {
// // Look for pub mod or mod declarations // // Look for pub mod or mod declarations
// if line.contains('pub mod ') || line.contains('mod ') { // if line.contains('pub mod ') || line.contains('mod ') {
@@ -160,7 +158,7 @@ import os
// } else { // } else {
// parts = line.split('mod ') // parts = line.split('mod ')
// } // }
// if parts.len > 1 { // if parts.len > 1 {
// // Extract the module name and remove any trailing characters // // Extract the module name and remove any trailing characters
// mut name := parts[1].trim_space() // mut name := parts[1].trim_space()
@@ -172,7 +170,7 @@ import os
// } // }
// } // }
// } // }
// return '' // return ''
// } // }
@@ -188,9 +186,9 @@ import os
// code_blocks := extract_code_blocks(response) or { // code_blocks := extract_code_blocks(response) or {
// return err // return err
// } // }
// name := gen.name // name := gen.name
// // Create a WrapperModule struct with the extracted content // // Create a WrapperModule struct with the extracted content
// wrapper := WrapperModule{ // wrapper := WrapperModule{
// lib_rs: $tmpl('./templates/lib.rs') // lib_rs: $tmpl('./templates/lib.rs')
@@ -201,17 +199,17 @@ import os
// cargo_toml: $tmpl('./templates/cargo.toml') // cargo_toml: $tmpl('./templates/cargo.toml')
// example_rhai: code_blocks.example_rhai // example_rhai: code_blocks.example_rhai
// } // }
// // Create the wrapper module // // Create the wrapper module
// project_dir := write_rhai_wrapper_module(wrapper, gen.name, gen.dir) or { // project_dir := write_rhai_wrapper_module(wrapper, gen.name, gen.dir) or {
// return error('Failed to create wrapper module: ${err}') // return error('Failed to create wrapper module: ${err}')
// } // }
// // Build and run the project // // Build and run the project
// build_output, run_output := rust.run_example(project_dir, 'example') or { // build_output, run_output := rust.run_example(project_dir, 'example') or {
// return err // return err
// } // }
// return format_success_message(project_dir, build_output, run_output) // return format_success_message(project_dir, build_output, run_output)
// } // }
@@ -229,14 +227,14 @@ import os
// if wrapper_rs_content == '' { // if wrapper_rs_content == '' {
// return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```') // return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
// } // }
// // Extract engine.rs content // // Extract engine.rs content
// mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust') // mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
// if engine_rs_content == '' { // if engine_rs_content == '' {
// // Try to extract from the response without explicit language marker // // Try to extract from the response without explicit language marker
// engine_rs_content = utils.extract_code_block(response, 'engine.rs', '') // engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
// } // }
// // Extract example.rhai content // // Extract example.rhai content
// mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai') // mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
// if example_rhai_content == '' { // if example_rhai_content == '' {
@@ -246,7 +244,7 @@ import os
// return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```') // return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
// } // }
// } // }
// return CodeBlocks{ // return CodeBlocks{
// wrapper_rs: wrapper_rs_content // wrapper_rs: wrapper_rs_content
// engine_rs: engine_rs_content // engine_rs: engine_rs_content

View File

@@ -2,17 +2,17 @@ module mcp
import cli import cli
pub const command := cli.Command{ pub const command = cli.Command{
sort_flags: true sort_flags: true
name: 'rhai' name: 'rhai'
// execute: cmd_mcpgen // execute: cmd_mcpgen
description: 'rhai command' description: 'rhai command'
commands: [ commands: [
cli.Command{ cli.Command{
name: 'start' name: 'start'
execute: cmd_start execute: cmd_start
description: 'start the Rhai server' description: 'start the Rhai server'
} },
] ]
} }
@@ -20,4 +20,3 @@ fn cmd_start(cmd cli.Command) ! {
mut server := new_mcp_server()! mut server := new_mcp_server()!
server.start()! server.start()!
} }

View File

@@ -9,10 +9,10 @@ pub fn new_mcp_server() !&mcp.Server {
// Initialize the server with the empty handlers map // Initialize the server with the empty handlers map
mut server := mcp.new_server(mcp.MemoryBackend{ mut server := mcp.new_server(mcp.MemoryBackend{
tools: { tools: {
'generate_rhai_wrapper': generate_rhai_wrapper_spec 'generate_rhai_wrapper': generate_rhai_wrapper_spec
} }
tool_handlers: { tool_handlers: {
'generate_rhai_wrapper': generate_rhai_wrapper_handler 'generate_rhai_wrapper': generate_rhai_wrapper_handler
} }
prompts: { prompts: {
@@ -30,4 +30,4 @@ pub fn new_mcp_server() !&mcp.Server {
} }
})! })!
return server return server
} }

View File

@@ -5,39 +5,41 @@ import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.ai.mcp.rhai.logic import freeflowuniverse.herolib.ai.mcp.rhai.logic
import freeflowuniverse.herolib.schemas.jsonschema import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.lang.rust import freeflowuniverse.herolib.lang.rust
import x.json2 as json { Any } import x.json2 as json
// Tool definition for the create_rhai_wrapper function // Tool definition for the create_rhai_wrapper function
const rhai_wrapper_prompt_spec = mcp.Prompt{ const rhai_wrapper_prompt_spec = mcp.Prompt{
name: 'rhai_wrapper' name: 'rhai_wrapper'
description: 'provides a prompt for creating Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file' description: 'provides a prompt for creating Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
arguments: [ arguments: [
mcp.PromptArgument{ mcp.PromptArgument{
name: 'source_path' name: 'source_path'
description: 'Path to the source directory' description: 'Path to the source directory'
required: true required: true
} },
] ]
} }
// Tool handler for the create_rhai_wrapper function // Tool handler for the create_rhai_wrapper function
pub fn rhai_wrapper_prompt_handler(arguments []string) ![]mcp.PromptMessage { pub fn rhai_wrapper_prompt_handler(arguments []string) ![]mcp.PromptMessage {
source_path := arguments[0] source_path := arguments[0]
// Read and combine all Rust files in the source directory // Read and combine all Rust files in the source directory
source_code := rust.read_source_code(source_path)! source_code := rust.read_source_code(source_path)!
// Extract the module name from the directory path (last component)
name := rust.extract_module_name_from_path(source_path)
source_pkg_info := rust.detect_source_package(source_path)!
result := logic.rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)! // Extract the module name from the directory path (last component)
return [mcp.PromptMessage{ name := rust.extract_module_name_from_path(source_path)
role: 'assistant'
content: mcp.PromptContent{ source_pkg_info := rust.detect_source_package(source_path)!
typ: 'text'
text: result result := logic.rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
} return [
}] mcp.PromptMessage{
} role: 'assistant'
content: mcp.PromptContent{
typ: 'text'
text: result
}
},
]
}

View File

@@ -1,19 +1,19 @@
module mcp module mcp
import freeflowuniverse.herolib.ai.mcp import freeflowuniverse.herolib.ai.mcp
import x.json2 as json { Any } import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema import freeflowuniverse.herolib.schemas.jsonschema
import log import log
const specs = mcp.Tool{ const specs = mcp.Tool{
name: 'rhai_interface' name: 'rhai_interface'
description: 'Add Rhai Interface to Rust Code Files' description: 'Add Rhai Interface to Rust Code Files'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{ 'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string', typ: 'string'
description: 'Path to a .rs file or directory containing .rs files to make rhai interface for', description: 'Path to a .rs file or directory containing .rs files to make rhai interface for'
}) })
} }
required: ['path'] required: ['path']

View File

@@ -8,32 +8,31 @@ import x.json2 as json { Any }
// Tool definition for the generate_rhai_wrapper function // Tool definition for the generate_rhai_wrapper function
const generate_rhai_wrapper_spec = mcp.Tool{ const generate_rhai_wrapper_spec = mcp.Tool{
name: 'generate_rhai_wrapper' name: 'generate_rhai_wrapper'
description: 'generate_rhai_wrapper receives the name of a V language function string, and the path to the module in which it exists.' description: 'generate_rhai_wrapper receives the name of a V language function string, and the path to the module in which it exists.'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'name': jsonschema.SchemaRef(jsonschema.Schema{ 'name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}), })
'source_path': jsonschema.SchemaRef(jsonschema.Schema{ 'source_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
} }
required: ['name', 'source_path'] required: ['name', 'source_path']
} }
} }
// Tool handler for the generate_rhai_wrapper function // Tool handler for the generate_rhai_wrapper function
pub fn generate_rhai_wrapper_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn generate_rhai_wrapper_handler(arguments map[string]Any) !mcp.ToolCallResult {
name := arguments['name'].str() name := arguments['name'].str()
source_path := arguments['source_path'].str() source_path := arguments['source_path'].str()
result := logic.generate_rhai_wrapper(name, source_path) result := logic.generate_rhai_wrapper(name, source_path) or {
or {
return mcp.error_tool_call_result(err) return mcp.error_tool_call_result(err)
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }

View File

@@ -1 +1 @@
module rhai module rhai

View File

@@ -2,16 +2,16 @@ module rust
import cli import cli
pub const command := cli.Command{ pub const command = cli.Command{
sort_flags: true sort_flags: true
name: 'rust' name: 'rust'
description: 'Rust language tools command' description: 'Rust language tools command'
commands: [ commands: [
cli.Command{ cli.Command{
name: 'start' name: 'start'
execute: cmd_start execute: cmd_start
description: 'start the Rust MCP server' description: 'start the Rust MCP server'
} },
] ]
} }

View File

@@ -1,6 +1,6 @@
module rust module rust
import freeflowuniverse.herolib.ai.mcp {ToolContent} import freeflowuniverse.herolib.ai.mcp { ToolContent }
pub fn result_to_mcp_tool_contents[T](result T) []ToolContent { pub fn result_to_mcp_tool_contents[T](result T) []ToolContent {
return [result_to_mcp_tool_content[T](result)] return [result_to_mcp_tool_content[T](result)]
@@ -51,4 +51,4 @@ pub fn array_to_mcp_tool_contents[U](array []U) []ToolContent {
contents << result_to_mcp_tool_content(item) contents << result_to_mcp_tool_content(item)
} }
return contents return contents
} }

View File

@@ -9,40 +9,40 @@ pub fn new_mcp_server() !&mcp.Server {
// Initialize the server with tools and prompts // Initialize the server with tools and prompts
mut server := mcp.new_server(mcp.MemoryBackend{ mut server := mcp.new_server(mcp.MemoryBackend{
tools: { tools: {
'list_functions_in_file': list_functions_in_file_spec 'list_functions_in_file': list_functions_in_file_spec
'list_structs_in_file': list_structs_in_file_spec 'list_structs_in_file': list_structs_in_file_spec
'list_modules_in_dir': list_modules_in_dir_spec 'list_modules_in_dir': list_modules_in_dir_spec
'get_import_statement': get_import_statement_spec 'get_import_statement': get_import_statement_spec
// 'get_module_dependency': get_module_dependency_spec // 'get_module_dependency': get_module_dependency_spec
} }
tool_handlers: { tool_handlers: {
'list_functions_in_file': list_functions_in_file_handler 'list_functions_in_file': list_functions_in_file_handler
'list_structs_in_file': list_structs_in_file_handler 'list_structs_in_file': list_structs_in_file_handler
'list_modules_in_dir': list_modules_in_dir_handler 'list_modules_in_dir': list_modules_in_dir_handler
'get_import_statement': get_import_statement_handler 'get_import_statement': get_import_statement_handler
// 'get_module_dependency': get_module_dependency_handler // 'get_module_dependency': get_module_dependency_handler
} }
prompts: { prompts: {
'rust_functions': rust_functions_prompt_spec 'rust_functions': rust_functions_prompt_spec
'rust_structs': rust_structs_prompt_spec 'rust_structs': rust_structs_prompt_spec
'rust_modules': rust_modules_prompt_spec 'rust_modules': rust_modules_prompt_spec
'rust_imports': rust_imports_prompt_spec 'rust_imports': rust_imports_prompt_spec
'rust_dependencies': rust_dependencies_prompt_spec 'rust_dependencies': rust_dependencies_prompt_spec
'rust_tools_guide': rust_tools_guide_prompt_spec 'rust_tools_guide': rust_tools_guide_prompt_spec
} }
prompt_handlers: { prompt_handlers: {
'rust_functions': rust_functions_prompt_handler 'rust_functions': rust_functions_prompt_handler
'rust_structs': rust_structs_prompt_handler 'rust_structs': rust_structs_prompt_handler
'rust_modules': rust_modules_prompt_handler 'rust_modules': rust_modules_prompt_handler
'rust_imports': rust_imports_prompt_handler 'rust_imports': rust_imports_prompt_handler
'rust_dependencies': rust_dependencies_prompt_handler 'rust_dependencies': rust_dependencies_prompt_handler
'rust_tools_guide': rust_tools_guide_prompt_handler 'rust_tools_guide': rust_tools_guide_prompt_handler
} }
}, mcp.ServerParams{ }, mcp.ServerParams{
config: mcp.ServerConfiguration{ config: mcp.ServerConfiguration{
server_info: mcp.ServerInfo{ server_info: mcp.ServerInfo{
name: 'rust' name: 'rust'
version: '1.0.0' version: '1.0.0'
} }
} }

View File

@@ -2,113 +2,123 @@ module rust
import freeflowuniverse.herolib.ai.mcp import freeflowuniverse.herolib.ai.mcp
import os import os
import x.json2 as json { Any } import x.json2 as json
// Prompt specification for Rust functions // Prompt specification for Rust functions
const rust_functions_prompt_spec = mcp.Prompt{ const rust_functions_prompt_spec = mcp.Prompt{
name: 'rust_functions' name: 'rust_functions'
description: 'Provides guidance on working with Rust functions and using the list_functions_in_file tool' description: 'Provides guidance on working with Rust functions and using the list_functions_in_file tool'
arguments: [] arguments: []
} }
// Handler for rust_functions prompt // Handler for rust_functions prompt
pub fn rust_functions_prompt_handler(arguments []string) ![]mcp.PromptMessage { pub fn rust_functions_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/functions.md')! content := os.read_file('${os.dir(@FILE)}/prompts/functions.md')!
return [mcp.PromptMessage{ return [
role: 'assistant' mcp.PromptMessage{
content: mcp.PromptContent{ role: 'assistant'
typ: 'text' content: mcp.PromptContent{
text: content typ: 'text'
} text: content
}] }
},
]
} }
// Prompt specification for Rust structs // Prompt specification for Rust structs
const rust_structs_prompt_spec = mcp.Prompt{ const rust_structs_prompt_spec = mcp.Prompt{
name: 'rust_structs' name: 'rust_structs'
description: 'Provides guidance on working with Rust structs and using the list_structs_in_file tool' description: 'Provides guidance on working with Rust structs and using the list_structs_in_file tool'
arguments: [] arguments: []
} }
// Handler for rust_structs prompt // Handler for rust_structs prompt
pub fn rust_structs_prompt_handler(arguments []string) ![]mcp.PromptMessage { pub fn rust_structs_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/structs.md')! content := os.read_file('${os.dir(@FILE)}/prompts/structs.md')!
return [mcp.PromptMessage{ return [
role: 'assistant' mcp.PromptMessage{
content: mcp.PromptContent{ role: 'assistant'
typ: 'text' content: mcp.PromptContent{
text: content typ: 'text'
} text: content
}] }
},
]
} }
// Prompt specification for Rust modules // Prompt specification for Rust modules
const rust_modules_prompt_spec = mcp.Prompt{ const rust_modules_prompt_spec = mcp.Prompt{
name: 'rust_modules' name: 'rust_modules'
description: 'Provides guidance on working with Rust modules and using the list_modules_in_dir tool' description: 'Provides guidance on working with Rust modules and using the list_modules_in_dir tool'
arguments: [] arguments: []
} }
// Handler for rust_modules prompt // Handler for rust_modules prompt
pub fn rust_modules_prompt_handler(arguments []string) ![]mcp.PromptMessage { pub fn rust_modules_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/modules.md')! content := os.read_file('${os.dir(@FILE)}/prompts/modules.md')!
return [mcp.PromptMessage{ return [
role: 'assistant' mcp.PromptMessage{
content: mcp.PromptContent{ role: 'assistant'
typ: 'text' content: mcp.PromptContent{
text: content typ: 'text'
} text: content
}] }
},
]
} }
// Prompt specification for Rust imports // Prompt specification for Rust imports
const rust_imports_prompt_spec = mcp.Prompt{ const rust_imports_prompt_spec = mcp.Prompt{
name: 'rust_imports' name: 'rust_imports'
description: 'Provides guidance on working with Rust imports and using the get_import_statement tool' description: 'Provides guidance on working with Rust imports and using the get_import_statement tool'
arguments: [] arguments: []
} }
// Handler for rust_imports prompt // Handler for rust_imports prompt
pub fn rust_imports_prompt_handler(arguments []string) ![]mcp.PromptMessage { pub fn rust_imports_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/imports.md')! content := os.read_file('${os.dir(@FILE)}/prompts/imports.md')!
return [mcp.PromptMessage{ return [
role: 'assistant' mcp.PromptMessage{
content: mcp.PromptContent{ role: 'assistant'
typ: 'text' content: mcp.PromptContent{
text: content typ: 'text'
} text: content
}] }
},
]
} }
// Prompt specification for Rust dependencies // Prompt specification for Rust dependencies
const rust_dependencies_prompt_spec = mcp.Prompt{ const rust_dependencies_prompt_spec = mcp.Prompt{
name: 'rust_dependencies' name: 'rust_dependencies'
description: 'Provides guidance on working with Rust dependencies and using the get_module_dependency tool' description: 'Provides guidance on working with Rust dependencies and using the get_module_dependency tool'
arguments: [] arguments: []
} }
// Handler for rust_dependencies prompt // Handler for rust_dependencies prompt
pub fn rust_dependencies_prompt_handler(arguments []string) ![]mcp.PromptMessage { pub fn rust_dependencies_prompt_handler(arguments []string) ![]mcp.PromptMessage {
content := os.read_file('${os.dir(@FILE)}/prompts/dependencies.md')! content := os.read_file('${os.dir(@FILE)}/prompts/dependencies.md')!
return [mcp.PromptMessage{ return [
role: 'assistant' mcp.PromptMessage{
content: mcp.PromptContent{ role: 'assistant'
typ: 'text' content: mcp.PromptContent{
text: content typ: 'text'
} text: content
}] }
},
]
} }
// Prompt specification for general Rust tools guide // Prompt specification for general Rust tools guide
const rust_tools_guide_prompt_spec = mcp.Prompt{ const rust_tools_guide_prompt_spec = mcp.Prompt{
name: 'rust_tools_guide' name: 'rust_tools_guide'
description: 'Provides a comprehensive guide on all available Rust tools and how to use them' description: 'Provides a comprehensive guide on all available Rust tools and how to use them'
arguments: [] arguments: []
} }
// Handler for rust_tools_guide prompt // Handler for rust_tools_guide prompt
@@ -119,26 +129,23 @@ pub fn rust_tools_guide_prompt_handler(arguments []string) ![]mcp.PromptMessage
modules_content := os.read_file('${os.dir(@FILE)}/prompts/modules.md')! modules_content := os.read_file('${os.dir(@FILE)}/prompts/modules.md')!
imports_content := os.read_file('${os.dir(@FILE)}/prompts/imports.md')! imports_content := os.read_file('${os.dir(@FILE)}/prompts/imports.md')!
dependencies_content := os.read_file('${os.dir(@FILE)}/prompts/dependencies.md')! dependencies_content := os.read_file('${os.dir(@FILE)}/prompts/dependencies.md')!
combined_content := '# Rust Language Tools Guide\n\n' + combined_content := '# Rust Language Tools Guide\n\n' +
'This guide provides comprehensive information on working with Rust code using the available tools.\n\n' + 'This guide provides comprehensive information on working with Rust code using the available tools.\n\n' +
'## Table of Contents\n\n' + '## Table of Contents\n\n' + '1. [Functions](#functions)\n' + '2. [Structs](#structs)\n' +
'1. [Functions](#functions)\n' + '3. [Modules](#modules)\n' + '4. [Imports](#imports)\n' +
'2. [Structs](#structs)\n' + '5. [Dependencies](#dependencies)\n\n' + '<a name="functions"></a>\n' + functions_content +
'3. [Modules](#modules)\n' + '\n\n' + '<a name="structs"></a>\n' + structs_content + '\n\n' +
'4. [Imports](#imports)\n' + '<a name="modules"></a>\n' + modules_content + '\n\n' + '<a name="imports"></a>\n' +
'5. [Dependencies](#dependencies)\n\n' + imports_content + '\n\n' + '<a name="dependencies"></a>\n' + dependencies_content
'<a name="functions"></a>\n' + functions_content + '\n\n' +
'<a name="structs"></a>\n' + structs_content + '\n\n' + return [
'<a name="modules"></a>\n' + modules_content + '\n\n' + mcp.PromptMessage{
'<a name="imports"></a>\n' + imports_content + '\n\n' + role: 'assistant'
'<a name="dependencies"></a>\n' + dependencies_content content: mcp.PromptContent{
typ: 'text'
return [mcp.PromptMessage{ text: combined_content
role: 'assistant' }
content: mcp.PromptContent{ },
typ: 'text' ]
text: combined_content
}
}]
} }

View File

@@ -1,111 +1,105 @@
module rust module rust
import freeflowuniverse.herolib.ai.mcp {ToolContent} import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.lang.rust import freeflowuniverse.herolib.lang.rust
import freeflowuniverse.herolib.schemas.jsonschema import freeflowuniverse.herolib.schemas.jsonschema
import x.json2 as json { Any } import x.json2 as json { Any }
// Tool specification for listing functions in a Rust file // Tool specification for listing functions in a Rust file
const list_functions_in_file_spec = mcp.Tool{ const list_functions_in_file_spec = mcp.Tool{
name: 'list_functions_in_file' name: 'list_functions_in_file'
description: 'Lists all function definitions in a Rust file' description: 'Lists all function definitions in a Rust file'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{ 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the Rust file' description: 'Path to the Rust file'
}) })
} }
required: ['file_path'] required: ['file_path']
} }
} }
// Handler for list_functions_in_file // Handler for list_functions_in_file
pub fn list_functions_in_file_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn list_functions_in_file_handler(arguments map[string]Any) !mcp.ToolCallResult {
file_path := arguments['file_path'].str() file_path := arguments['file_path'].str()
result := rust.list_functions_in_file(file_path) or { result := rust.list_functions_in_file(file_path) or { return mcp.error_tool_call_result(err) }
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.array_to_mcp_tool_contents[string](result) content: mcp.array_to_mcp_tool_contents[string](result)
} }
} }
// Tool specification for listing structs in a Rust file // Tool specification for listing structs in a Rust file
const list_structs_in_file_spec = mcp.Tool{ const list_structs_in_file_spec = mcp.Tool{
name: 'list_structs_in_file' name: 'list_structs_in_file'
description: 'Lists all struct definitions in a Rust file' description: 'Lists all struct definitions in a Rust file'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{ 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the Rust file' description: 'Path to the Rust file'
}) })
} }
required: ['file_path'] required: ['file_path']
} }
} }
// Handler for list_structs_in_file // Handler for list_structs_in_file
pub fn list_structs_in_file_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn list_structs_in_file_handler(arguments map[string]Any) !mcp.ToolCallResult {
file_path := arguments['file_path'].str() file_path := arguments['file_path'].str()
result := rust.list_structs_in_file(file_path) or { result := rust.list_structs_in_file(file_path) or { return mcp.error_tool_call_result(err) }
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.array_to_mcp_tool_contents[string](result) content: mcp.array_to_mcp_tool_contents[string](result)
} }
} }
// Tool specification for listing modules in a directory // Tool specification for listing modules in a directory
const list_modules_in_dir_spec = mcp.Tool{ const list_modules_in_dir_spec = mcp.Tool{
name: 'list_modules_in_dir' name: 'list_modules_in_dir'
description: 'Lists all Rust modules in a directory' description: 'Lists all Rust modules in a directory'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'dir_path': jsonschema.SchemaRef(jsonschema.Schema{ 'dir_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the directory' description: 'Path to the directory'
}) })
} }
required: ['dir_path'] required: ['dir_path']
} }
} }
// Handler for list_modules_in_dir // Handler for list_modules_in_dir
pub fn list_modules_in_dir_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn list_modules_in_dir_handler(arguments map[string]Any) !mcp.ToolCallResult {
dir_path := arguments['dir_path'].str() dir_path := arguments['dir_path'].str()
result := rust.list_modules_in_directory(dir_path) or { result := rust.list_modules_in_directory(dir_path) or { return mcp.error_tool_call_result(err) }
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.array_to_mcp_tool_contents[string](result) content: mcp.array_to_mcp_tool_contents[string](result)
} }
} }
// Tool specification for getting an import statement // Tool specification for getting an import statement
const get_import_statement_spec = mcp.Tool{ const get_import_statement_spec = mcp.Tool{
name: 'get_import_statement' name: 'get_import_statement'
description: 'Generates appropriate Rust import statement for a module based on file paths' description: 'Generates appropriate Rust import statement for a module based on file paths'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'current_file': jsonschema.SchemaRef(jsonschema.Schema{ 'current_file': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the file where the import will be added' description: 'Path to the file where the import will be added'
}), })
'target_module': jsonschema.SchemaRef(jsonschema.Schema{ 'target_module': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the target module to be imported' description: 'Path to the target module to be imported'
}) })
} }
required: ['current_file', 'target_module'] required: ['current_file', 'target_module']
} }
} }
@@ -118,33 +112,33 @@ pub fn get_import_statement_handler(arguments map[string]Any) !mcp.ToolCallResul
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
// Tool specification for getting module dependency information // Tool specification for getting module dependency information
const get_module_dependency_spec = mcp.Tool{ const get_module_dependency_spec = mcp.Tool{
name: 'get_module_dependency' name: 'get_module_dependency'
description: 'Gets dependency information for adding a Rust module to a project' description: 'Gets dependency information for adding a Rust module to a project'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'importer_path': jsonschema.SchemaRef(jsonschema.Schema{ 'importer_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the file that will import the module' description: 'Path to the file that will import the module'
}), })
'module_path': jsonschema.SchemaRef(jsonschema.Schema{ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the module that will be imported' description: 'Path to the module that will be imported'
}) })
} }
required: ['importer_path', 'module_path'] required: ['importer_path', 'module_path']
} }
} }
struct Tester { struct Tester {
import_statement string import_statement string
module_path string module_path string
} }
// Handler for get_module_dependency // Handler for get_module_dependency
@@ -157,9 +151,9 @@ pub fn get_module_dependency_handler(arguments map[string]Any) !mcp.ToolCallResu
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: result_to_mcp_tool_contents[Tester](Tester{ content: result_to_mcp_tool_contents[Tester](Tester{
import_statement: dependency.import_statement import_statement: dependency.import_statement
module_path: dependency.module_path module_path: dependency.module_path
}) // Return JSON string }) // Return JSON string
} }
} }
@@ -168,21 +162,21 @@ pub fn get_module_dependency_handler(arguments map[string]Any) !mcp.ToolCallResu
// Specification for get_function_from_file tool // Specification for get_function_from_file tool
const get_function_from_file_spec = mcp.Tool{ const get_function_from_file_spec = mcp.Tool{
name: 'get_function_from_file' name: 'get_function_from_file'
description: 'Get the declaration of a Rust function from a specified file path.' description: 'Get the declaration of a Rust function from a specified file path.'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{ 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the Rust file.' description: 'Path to the Rust file.'
}), })
'function_name': jsonschema.SchemaRef(jsonschema.Schema{ 'function_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Name of the function to retrieve (e.g., \'my_function\' or \'MyStruct::my_method\').' description: "Name of the function to retrieve (e.g., 'my_function' or 'MyStruct::my_method')."
}) })
} }
required: ['file_path', 'function_name'] required: ['file_path', 'function_name']
} }
} }
@@ -195,7 +189,7 @@ pub fn get_function_from_file_handler(arguments map[string]Any) !mcp.ToolCallRes
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
@@ -203,21 +197,21 @@ pub fn get_function_from_file_handler(arguments map[string]Any) !mcp.ToolCallRes
// Specification for get_function_from_module tool // Specification for get_function_from_module tool
const get_function_from_module_spec = mcp.Tool{ const get_function_from_module_spec = mcp.Tool{
name: 'get_function_from_module' name: 'get_function_from_module'
description: 'Get the declaration of a Rust function from a specified module path (directory or file).' description: 'Get the declaration of a Rust function from a specified module path (directory or file).'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'module_path': jsonschema.SchemaRef(jsonschema.Schema{ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the Rust module directory or file.' description: 'Path to the Rust module directory or file.'
}), })
'function_name': jsonschema.SchemaRef(jsonschema.Schema{ 'function_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Name of the function to retrieve (e.g., \'my_function\' or \'MyStruct::my_method\').' description: "Name of the function to retrieve (e.g., 'my_function' or 'MyStruct::my_method')."
}) })
} }
required: ['module_path', 'function_name'] required: ['module_path', 'function_name']
} }
} }
@@ -230,7 +224,7 @@ pub fn get_function_from_module_handler(arguments map[string]Any) !mcp.ToolCallR
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
@@ -238,21 +232,21 @@ pub fn get_function_from_module_handler(arguments map[string]Any) !mcp.ToolCallR
// Specification for get_struct_from_file tool // Specification for get_struct_from_file tool
const get_struct_from_file_spec = mcp.Tool{ const get_struct_from_file_spec = mcp.Tool{
name: 'get_struct_from_file' name: 'get_struct_from_file'
description: 'Get the declaration of a Rust struct from a specified file path.' description: 'Get the declaration of a Rust struct from a specified file path.'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{ 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the Rust file.' description: 'Path to the Rust file.'
}), })
'struct_name': jsonschema.SchemaRef(jsonschema.Schema{ 'struct_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Name of the struct to retrieve (e.g., \'MyStruct\').' description: "Name of the struct to retrieve (e.g., 'MyStruct')."
}) })
} }
required: ['file_path', 'struct_name'] required: ['file_path', 'struct_name']
} }
} }
@@ -265,7 +259,7 @@ pub fn get_struct_from_file_handler(arguments map[string]Any) !mcp.ToolCallResul
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }
@@ -273,21 +267,21 @@ pub fn get_struct_from_file_handler(arguments map[string]Any) !mcp.ToolCallResul
// Specification for get_struct_from_module tool // Specification for get_struct_from_module tool
const get_struct_from_module_spec = mcp.Tool{ const get_struct_from_module_spec = mcp.Tool{
name: 'get_struct_from_module' name: 'get_struct_from_module'
description: 'Get the declaration of a Rust struct from a specified module path (directory or file).' description: 'Get the declaration of a Rust struct from a specified module path (directory or file).'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'module_path': jsonschema.SchemaRef(jsonschema.Schema{ 'module_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Path to the Rust module directory or file.' description: 'Path to the Rust module directory or file.'
}), })
'struct_name': jsonschema.SchemaRef(jsonschema.Schema{ 'struct_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
description: 'Name of the struct to retrieve (e.g., \'MyStruct\').' description: "Name of the struct to retrieve (e.g., 'MyStruct')."
}) })
} }
required: ['module_path', 'struct_name'] required: ['module_path', 'struct_name']
} }
} }
@@ -300,6 +294,6 @@ pub fn get_struct_from_module_handler(arguments map[string]Any) !mcp.ToolCallRes
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](result) content: mcp.result_to_mcp_tool_contents[string](result)
} }
} }

View File

@@ -8,7 +8,7 @@ fn main() {
eprintln('Failed to create MCP server: ${err}') eprintln('Failed to create MCP server: ${err}')
return return
} }
// Start the server // Start the server
server.start() or { server.start() or {
eprintln('Failed to start MCP server: ${err}') eprintln('Failed to start MCP server: ${err}')

View File

@@ -15,11 +15,11 @@ pub fn new_mcp_server(v &VCode) !&mcp.Server {
mut server := mcp.new_server(mcp.MemoryBackend{ mut server := mcp.new_server(mcp.MemoryBackend{
tools: { tools: {
'get_function_from_file': get_function_from_file_tool 'get_function_from_file': get_function_from_file_tool
'write_vfile': write_vfile_tool 'write_vfile': write_vfile_tool
} }
tool_handlers: { tool_handlers: {
'get_function_from_file': v.get_function_from_file_tool_handler 'get_function_from_file': v.get_function_from_file_tool_handler
'write_vfile': v.write_vfile_tool_handler 'write_vfile': v.write_vfile_tool_handler
} }
}, mcp.ServerParams{ }, mcp.ServerParams{
config: mcp.ServerConfiguration{ config: mcp.ServerConfiguration{
@@ -30,4 +30,4 @@ pub fn new_mcp_server(v &VCode) !&mcp.Server {
} }
})! })!
return server return server
} }

View File

@@ -3,7 +3,7 @@ module vcode
import freeflowuniverse.herolib.ai.mcp import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.core.code import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.schemas.jsonschema import freeflowuniverse.herolib.schemas.jsonschema
import x.json2 {Any} import x.json2 { Any }
const get_function_from_file_tool = mcp.Tool{ const get_function_from_file_tool = mcp.Tool{
name: 'get_function_from_file' name: 'get_function_from_file'
@@ -16,10 +16,10 @@ RETURNS: string - the function block including comments, or empty string if not
typ: 'object' typ: 'object'
properties: { properties: {
'file_path': jsonschema.SchemaRef(jsonschema.Schema{ 'file_path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
'function_name': jsonschema.SchemaRef(jsonschema.Schema{ 'function_name': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
} }
required: ['file_path', 'function_name'] required: ['file_path', 'function_name']

View File

@@ -3,7 +3,7 @@ module vcode
import freeflowuniverse.herolib.ai.mcp import freeflowuniverse.herolib.ai.mcp
import freeflowuniverse.herolib.core.code import freeflowuniverse.herolib.core.code
import freeflowuniverse.herolib.schemas.jsonschema import freeflowuniverse.herolib.schemas.jsonschema
import x.json2 {Any} import x.json2 { Any }
const write_vfile_tool = mcp.Tool{ const write_vfile_tool = mcp.Tool{
name: 'write_vfile' name: 'write_vfile'
@@ -18,20 +18,20 @@ RETURNS: string - success message with the path of the written file'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{ 'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
'code': jsonschema.SchemaRef(jsonschema.Schema{ 'code': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
'format': jsonschema.SchemaRef(jsonschema.Schema{ 'format': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'boolean' typ: 'boolean'
}) })
'overwrite': jsonschema.SchemaRef(jsonschema.Schema{ 'overwrite': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'boolean' typ: 'boolean'
}) })
'prefix': jsonschema.SchemaRef(jsonschema.Schema{ 'prefix': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string' typ: 'string'
}) })
} }
required: ['path', 'code'] required: ['path', 'code']
@@ -41,31 +41,27 @@ RETURNS: string - success message with the path of the written file'
pub fn (d &VCode) write_vfile_tool_handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn (d &VCode) write_vfile_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str() path := arguments['path'].str()
code_str := arguments['code'].str() code_str := arguments['code'].str()
// Parse optional parameters with defaults // Parse optional parameters with defaults
format := if 'format' in arguments { arguments['format'].bool() } else { false } format := if 'format' in arguments { arguments['format'].bool() } else { false }
overwrite := if 'overwrite' in arguments { arguments['overwrite'].bool() } else { false } overwrite := if 'overwrite' in arguments { arguments['overwrite'].bool() } else { false }
prefix := if 'prefix' in arguments { arguments['prefix'].str() } else { '' } prefix := if 'prefix' in arguments { arguments['prefix'].str() } else { '' }
// Create write options // Create write options
options := code.WriteOptions{ options := code.WriteOptions{
format: format format: format
overwrite: overwrite overwrite: overwrite
prefix: prefix prefix: prefix
} }
// Parse the V code string into a VFile // Parse the V code string into a VFile
vfile := code.parse_vfile(code_str) or { vfile := code.parse_vfile(code_str) or { return mcp.error_tool_call_result(err) }
return mcp.error_tool_call_result(err)
}
// Write the VFile to the specified path // Write the VFile to the specified path
vfile.write(path, options) or { vfile.write(path, options) or { return mcp.error_tool_call_result(err) }
return mcp.error_tool_call_result(err)
}
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string]('Successfully wrote V file to ${path}') content: mcp.result_to_mcp_tool_contents[string]('Successfully wrote V file to ${path}')
} }
} }

View File

@@ -8,47 +8,47 @@ import os
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult { pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
path := arguments['path'].str() path := arguments['path'].str()
// Check if path exists // Check if path exists
if !os.exists(path) { if !os.exists(path) {
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: true is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist") content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
} }
} }
// Determine if path is a file or directory // Determine if path is a file or directory
is_directory := os.is_dir(path) is_directory := os.is_dir(path)
mut message := "" mut message := ''
if is_directory { if is_directory {
// Convert all pug files in the directory // Convert all pug files in the directory
pugconvert.convert_pug(path) or { pugconvert.convert_pug(path) or {
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: true is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error converting pug files in directory: ${err}") content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
} }
} }
message = "Successfully converted all pug files in directory '${path}'" message = "Successfully converted all pug files in directory '${path}'"
} else if path.ends_with(".v") { } else if path.ends_with('.v') {
// Convert a single pug file // Convert a single pug file
pugconvert.convert_pug_file(path) or { pugconvert.convert_pug_file(path) or {
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: true is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error converting pug file: ${err}") content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
} }
} }
message = "Successfully converted pug file '${path}'" message = "Successfully converted pug file '${path}'"
} else { } else {
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: true is_error: true
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file") content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
} }
} }
return mcp.ToolCallResult{ return mcp.ToolCallResult{
is_error: false is_error: false
content: mcp.result_to_mcp_tool_contents[string](message) content: mcp.result_to_mcp_tool_contents[string](message)
} }
} }

View File

@@ -1,18 +1,18 @@
module pugconvert module pugconvert
import freeflowuniverse.herolib.ai.mcp import freeflowuniverse.herolib.ai.mcp
import x.json2 as json { Any } import x.json2 as json
import freeflowuniverse.herolib.schemas.jsonschema import freeflowuniverse.herolib.schemas.jsonschema
import freeflowuniverse.herolib.ai.mcp.logger import freeflowuniverse.herolib.ai.mcp.logger
const specs = mcp.Tool{ const specs = mcp.Tool{
name: 'pugconvert' name: 'pugconvert'
description: 'Convert Pug template files to Jet template files' description: 'Convert Pug template files to Jet template files'
input_schema: jsonschema.Schema{ input_schema: jsonschema.Schema{
typ: 'object' typ: 'object'
properties: { properties: {
'path': jsonschema.SchemaRef(jsonschema.Schema{ 'path': jsonschema.SchemaRef(jsonschema.Schema{
typ: 'string', typ: 'string'
description: 'Path to a .pug file or directory containing .pug files to convert' description: 'Path to a .pug file or directory containing .pug files to convert'
}) })
} }

View File

@@ -2,33 +2,29 @@ module utils
// Helper function to extract code blocks from the response // Helper function to extract code blocks from the response
pub fn extract_code_block(response string, identifier string, language string) string { pub fn extract_code_block(response string, identifier string, language string) string {
// Find the start marker for the code block // Find the start marker for the code block
mut start_marker := '```${language}\n// ${identifier}' mut start_marker := '```${language}\n// ${identifier}'
if language == '' { if language == '' {
start_marker = '```\n// ${identifier}' start_marker = '```\n// ${identifier}'
} }
start_index := response.index(start_marker) or { start_index := response.index(start_marker) or {
// Try alternative format // Try alternative format
mut alt_marker := '```${language}\n${identifier}' mut alt_marker := '```${language}\n${identifier}'
if language == '' { if language == '' {
alt_marker = '```\n${identifier}' alt_marker = '```\n${identifier}'
} }
response.index(alt_marker) or { response.index(alt_marker) or { return '' }
return '' }
}
} // Find the end marker
end_marker := '```'
// Find the end marker end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
end_marker := '```'
end_index := response.index_after(end_marker, start_index + start_marker.len) or { // Extract the content between the markers
return '' content_start := start_index + start_marker.len
} content := response[content_start..end_index].trim_space()
// Extract the content between the markers return content
content_start := start_index + start_marker.len }
content := response[content_start..end_index].trim_space()
return content
}

View File

@@ -11,8 +11,7 @@ pub fn generate_module_from_openapi(openapi_path string) !string {
openapi_spec := openapi.new(path: openapi_path)! openapi_spec := openapi.new(path: openapi_path)!
actor_spec := specification.from_openapi(openapi_spec)! actor_spec := specification.from_openapi(openapi_spec)!
actor_module := generator.generate_actor_module( actor_module := generate_actor_module(actor_spec,
actor_spec,
interfaces: [.openapi, .http] interfaces: [.openapi, .http]
)! )!

View File

@@ -1,6 +1,6 @@
module generator module generator
import freeflowuniverse.herolib.core.code { Array, CodeItem, Function, Import, Param, Result, Struct, VFile } import freeflowuniverse.herolib.core.code { CodeItem, Function, Import, Param, Result, Struct, VFile }
import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.schemas.openapi import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc import freeflowuniverse.herolib.schemas.openrpc
@@ -18,12 +18,13 @@ pub struct Source {
} }
pub fn generate_methods_file_str(source Source) !string { pub fn generate_methods_file_str(source Source) !string {
actor_spec := if path := source.openapi_path { actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)! specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path { } else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)! specification.from_openrpc(openrpc.new(path: path)!)!
} else {
panic('No openapi or openrpc path provided')
} }
else { panic('No openapi or openrpc path provided') }
return generate_methods_file(actor_spec)!.write_str()! return generate_methods_file(actor_spec)!.write_str()!
} }

View File

@@ -10,12 +10,13 @@ import freeflowuniverse.herolib.baobab.specification { ActorMethod, ActorSpecifi
import freeflowuniverse.herolib.schemas.openapi import freeflowuniverse.herolib.schemas.openapi
pub fn generate_methods_example_file_str(source Source) !string { pub fn generate_methods_example_file_str(source Source) !string {
actor_spec := if path := source.openapi_path { actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)! specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path { } else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)! specification.from_openrpc(openrpc.new(path: path)!)!
} else {
panic('No openapi or openrpc path provided')
} }
else { panic('No openapi or openrpc path provided') }
return generate_methods_example_file(actor_spec)!.write_str()! return generate_methods_example_file(actor_spec)!.write_str()!
} }

View File

@@ -8,12 +8,13 @@ import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc import freeflowuniverse.herolib.schemas.openrpc
pub fn generate_methods_interface_file_str(source Source) !string { pub fn generate_methods_interface_file_str(source Source) !string {
actor_spec := if path := source.openapi_path { actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)! specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path { } else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)! specification.from_openrpc(openrpc.new(path: path)!)!
} else {
panic('No openapi or openrpc path provided')
} }
else { panic('No openapi or openrpc path provided') }
return generate_methods_interface_file(actor_spec)!.write_str()! return generate_methods_interface_file(actor_spec)!.write_str()!
} }

View File

@@ -8,12 +8,13 @@ import freeflowuniverse.herolib.schemas.openapi
import freeflowuniverse.herolib.schemas.openrpc import freeflowuniverse.herolib.schemas.openrpc
pub fn generate_model_file_str(source Source) !string { pub fn generate_model_file_str(source Source) !string {
actor_spec := if path := source.openapi_path { actor_spec := if path := source.openapi_path {
specification.from_openapi(openapi.new(path: path)!)! specification.from_openapi(openapi.new(path: path)!)!
} else if path := source.openrpc_path { } else if path := source.openrpc_path {
specification.from_openrpc(openrpc.new(path: path)!)! specification.from_openrpc(openrpc.new(path: path)!)!
} else {
panic('No openapi or openrpc path provided')
} }
else { panic('No openapi or openrpc path provided') }
return generate_model_file(actor_spec)!.write_str()! return generate_model_file(actor_spec)!.write_str()!
} }

View File

@@ -3,7 +3,7 @@ module specification
import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.core.texttools
import freeflowuniverse.herolib.core.code { Struct } import freeflowuniverse.herolib.core.code { Struct }
import freeflowuniverse.herolib.schemas.jsonschema { Schema, SchemaRef } import freeflowuniverse.herolib.schemas.jsonschema { Schema, SchemaRef }
import freeflowuniverse.herolib.schemas.openapi { MediaType, OpenAPI, Parameter, Operation, OperationInfo } import freeflowuniverse.herolib.schemas.openapi { MediaType, OpenAPI, OperationInfo, Parameter }
import freeflowuniverse.herolib.schemas.openrpc { ContentDescriptor, ErrorSpec, Example, ExamplePairing, ExampleRef } import freeflowuniverse.herolib.schemas.openrpc { ContentDescriptor, ErrorSpec, Example, ExamplePairing, ExampleRef }
// Helper function: Convert OpenAPI parameter to ContentDescriptor // Helper function: Convert OpenAPI parameter to ContentDescriptor

View File

@@ -114,7 +114,7 @@ fn (mut f OpenAI) create_audio_request(args AudioArgs, endpoint string) !AudioRe
@[params] @[params]
pub struct CreateSpeechArgs { pub struct CreateSpeechArgs {
pub: pub:
model string = "tts_1" model string = 'tts_1'
input string @[required] input string @[required]
voice Voice = .alloy voice Voice = .alloy
response_format AudioFormat = .mp3 response_format AudioFormat = .mp3

View File

@@ -9,9 +9,9 @@ fn test_chat_completion() {
println(client.list_models()!) println(client.list_models()!)
raise("sss") raise('sss')
res := client.chat_completion( Messages{ res := client.chat_completion(Messages{
messages: [ messages: [
Message{ Message{
role: .user role: .user

View File

@@ -44,31 +44,31 @@ pub mut:
struct ChatMessagesRaw { struct ChatMessagesRaw {
mut: mut:
model string model string
messages []MessageRaw messages []MessageRaw
temperature f64 = 0.5 temperature f64 = 0.5
max_completion_tokens int = 32000 max_completion_tokens int = 32000
} }
@[params] @[params]
pub struct CompletionArgs{ pub struct CompletionArgs {
pub mut: pub mut:
model string model string
msgs Messages msgs Messages
temperature f64 = 0.5 temperature f64 = 0.5
max_completion_tokens int = 32000 max_completion_tokens int = 32000
} }
// creates a new chat completion given a list of messages // creates a new chat completion given a list of messages
// each message consists of message content and the role of the author // each message consists of message content and the role of the author
pub fn (mut f OpenAI) chat_completion(args_ CompletionArgs) !ChatCompletion { pub fn (mut f OpenAI) chat_completion(args_ CompletionArgs) !ChatCompletion {
mut args:=args_ mut args := args_
if args.model==""{ if args.model == '' {
args.model = f.model_default args.model = f.model_default
} }
mut m := ChatMessagesRaw{ mut m := ChatMessagesRaw{
model: args.model model: args.model
temperature: args.temperature temperature: args.temperature
max_completion_tokens: args.max_completion_tokens max_completion_tokens: args.max_completion_tokens
} }
for msg in args.msgs.messages { for msg in args.msgs.messages {

View File

@@ -28,7 +28,9 @@ fn args_get(args_ ArgsGet) ArgsGet {
pub fn get(args_ ArgsGet) !&OpenAI { pub fn get(args_ ArgsGet) !&OpenAI {
mut context := base.context()! mut context := base.context()!
mut args := args_get(args_) mut args := args_get(args_)
mut obj := OpenAI{name:args.name} mut obj := OpenAI{
name: args.name
}
if args.name !in openai_global { if args.name !in openai_global {
if !exists(args)! { if !exists(args)! {
set(obj)! set(obj)!

View File

@@ -22,44 +22,43 @@ const default = true
@[heap] @[heap]
pub struct OpenAI { pub struct OpenAI {
pub mut: pub mut:
name string = 'default' name string = 'default'
api_key string api_key string
url string url string
model_default string model_default string
conn ?&httpconnection.HTTPConnection @[skip; str: skip] conn ?&httpconnection.HTTPConnection @[skip; str: skip]
} }
// your checking & initialization code if needed // your checking & initialization code if needed
fn obj_init(mycfg_ OpenAI) !OpenAI { fn obj_init(mycfg_ OpenAI) !OpenAI {
mut mycfg := mycfg_ mut mycfg := mycfg_
if mycfg.api_key==""{ if mycfg.api_key == '' {
mut k := os.getenv('AIKEY')
mut k:=os.getenv('AIKEY') if k != '' {
if k != ""{ mycfg.api_key = k
mycfg.api_key = k k = os.getenv('AIURL')
k=os.getenv('AIURL') if k != '' {
if k != ""{
mycfg.url = k mycfg.url = k
}else{ } else {
return error("found AIKEY in env, but not AIURL") return error('found AIKEY in env, but not AIURL')
} }
k=os.getenv('AIMODEL') k = os.getenv('AIMODEL')
if k != ""{ if k != '' {
mycfg.model_default = k mycfg.model_default = k
} }
return mycfg return mycfg
}
mycfg.url = "https://api.openai.com/v1/models"
k=os.getenv('OPENAI_API_KEY')
if k != ""{
mycfg.api_key = k
return mycfg
} }
k=os.getenv('OPENROUTER_API_KEY') mycfg.url = 'https://api.openai.com/v1/models'
if k != ""{ k = os.getenv('OPENAI_API_KEY')
mycfg.api_key = k if k != '' {
mycfg.url = "https://openrouter.ai/api/v1" mycfg.api_key = k
return mycfg return mycfg
}
k = os.getenv('OPENROUTER_API_KEY')
if k != '' {
mycfg.api_key = k
mycfg.url = 'https://openrouter.ai/api/v1'
return mycfg
} }
} }
return mycfg return mycfg
@@ -75,12 +74,12 @@ pub fn (mut client OpenAI) connection() !&httpconnection.HTTPConnection {
)! )!
c2 c2
} }
c.default_header.set(.authorization, 'Bearer ${client.api_key}') c.default_header.set(.authorization, 'Bearer ${client.api_key}')
client.conn = c client.conn = c
return c return c
} }
/////////////NORMALLY NO NEED TO TOUCH /////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj OpenAI) !string { pub fn heroscript_dumps(obj OpenAI) !string {

View File

@@ -6,9 +6,9 @@ import freeflowuniverse.herolib.core.pathlib
import os import os
pub interface IFile { pub interface IFile {
name string
write(string, WriteOptions) ! write(string, WriteOptions) !
write_str(WriteOptions) !string write_str(WriteOptions) !string
name string
} }
pub struct File { pub struct File {
@@ -124,7 +124,9 @@ pub fn (code VFile) write_str(options WriteOptions) !string {
'' ''
} }
mod_stmt := if code.mod == '' {''} else { mod_stmt := if code.mod == '' {
''
} else {
'module ${code.mod}' 'module ${code.mod}'
} }
@@ -169,9 +171,9 @@ pub fn parse_vfile(code string) !VFile {
mut vfile := VFile{ mut vfile := VFile{
content: code content: code
} }
lines := code.split_into_lines() lines := code.split_into_lines()
// Extract module name // Extract module name
for line in lines { for line in lines {
trimmed := line.trim_space() trimmed := line.trim_space()
@@ -180,7 +182,7 @@ pub fn parse_vfile(code string) !VFile {
break break
} }
} }
// Extract imports // Extract imports
for line in lines { for line in lines {
trimmed := line.trim_space() trimmed := line.trim_space()
@@ -189,29 +191,29 @@ pub fn parse_vfile(code string) !VFile {
vfile.imports << import_obj vfile.imports << import_obj
} }
} }
// Extract constants // Extract constants
vfile.consts = parse_consts(code) or { []Const{} } vfile.consts = parse_consts(code) or { []Const{} }
// Split code into chunks for parsing structs and functions // Split code into chunks for parsing structs and functions
mut chunks := []string{} mut chunks := []string{}
mut current_chunk := '' mut current_chunk := ''
mut brace_count := 0 mut brace_count := 0
mut in_struct_or_fn := false mut in_struct_or_fn := false
mut comment_block := []string{} mut comment_block := []string{}
for line in lines { for line in lines {
trimmed := line.trim_space() trimmed := line.trim_space()
// Collect comments // Collect comments
if trimmed.starts_with('//') && !in_struct_or_fn { if trimmed.starts_with('//') && !in_struct_or_fn {
comment_block << line comment_block << line
continue continue
} }
// Check for struct or function start // Check for struct or function start
if (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ') || if (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ')
trimmed.starts_with('fn ') || trimmed.starts_with('pub fn ')) && !in_struct_or_fn { || trimmed.starts_with('fn ') || trimmed.starts_with('pub fn ')) && !in_struct_or_fn {
in_struct_or_fn = true in_struct_or_fn = true
current_chunk = comment_block.join('\n') current_chunk = comment_block.join('\n')
if current_chunk != '' { if current_chunk != '' {
@@ -219,14 +221,14 @@ pub fn parse_vfile(code string) !VFile {
} }
current_chunk += line current_chunk += line
comment_block = []string{} comment_block = []string{}
if line.contains('{') { if line.contains('{') {
brace_count += line.count('{') brace_count += line.count('{')
} }
if line.contains('}') { if line.contains('}') {
brace_count -= line.count('}') brace_count -= line.count('}')
} }
if brace_count == 0 { if brace_count == 0 {
// Single line definition // Single line definition
chunks << current_chunk chunks << current_chunk
@@ -235,18 +237,18 @@ pub fn parse_vfile(code string) !VFile {
} }
continue continue
} }
// Add line to current chunk if we're inside a struct or function // Add line to current chunk if we're inside a struct or function
if in_struct_or_fn { if in_struct_or_fn {
current_chunk += '\n' + line current_chunk += '\n' + line
if line.contains('{') { if line.contains('{') {
brace_count += line.count('{') brace_count += line.count('{')
} }
if line.contains('}') { if line.contains('}') {
brace_count -= line.count('}') brace_count -= line.count('}')
} }
// Check if we've reached the end of the struct or function // Check if we've reached the end of the struct or function
if brace_count == 0 { if brace_count == 0 {
chunks << current_chunk chunks << current_chunk
@@ -255,11 +257,11 @@ pub fn parse_vfile(code string) !VFile {
} }
} }
} }
// Parse each chunk and add to items // Parse each chunk and add to items
for chunk in chunks { for chunk in chunks {
trimmed := chunk.trim_space() trimmed := chunk.trim_space()
if trimmed.contains('struct ') || trimmed.contains('pub struct ') { if trimmed.contains('struct ') || trimmed.contains('pub struct ') {
// Parse struct // Parse struct
struct_obj := parse_struct(chunk) or { struct_obj := parse_struct(chunk) or {
@@ -276,6 +278,6 @@ pub fn parse_vfile(code string) !VFile {
vfile.items << fn_obj vfile.items << fn_obj
} }
} }
return vfile return vfile
} }

View File

@@ -1,7 +1,7 @@
module code module code
fn test_parse_vfile() { fn test_parse_vfile() {
code := ' code := "
module test module test
import os import os
@@ -9,7 +9,7 @@ import strings
import freeflowuniverse.herolib.core.texttools import freeflowuniverse.herolib.core.texttools
const ( const (
VERSION = \'1.0.0\' VERSION = '1.0.0'
DEBUG = true DEBUG = true
) )
@@ -21,7 +21,7 @@ pub mut:
// greet returns a greeting message // greet returns a greeting message
pub fn (p Person) greet() string { pub fn (p Person) greet() string {
return \'Hello, my name is \${p.name} and I am \${p.age} years old\' return 'Hello, my name is \${p.name} and I am \${p.age} years old'
} }
// create_person creates a new Person instance // create_person creates a new Person instance
@@ -31,7 +31,7 @@ pub fn create_person(name string, age int) Person {
age: age age: age
} }
} }
' "
vfile := parse_vfile(code) or { vfile := parse_vfile(code) or {
assert false, 'Failed to parse VFile: ${err}' assert false, 'Failed to parse VFile: ${err}'
@@ -50,7 +50,7 @@ pub fn create_person(name string, age int) Person {
// Test constants // Test constants
assert vfile.consts.len == 2 assert vfile.consts.len == 2
assert vfile.consts[0].name == 'VERSION' assert vfile.consts[0].name == 'VERSION'
assert vfile.consts[0].value == '\'1.0.0\'' assert vfile.consts[0].value == "'1.0.0'"
assert vfile.consts[1].name == 'DEBUG' assert vfile.consts[1].name == 'DEBUG'
assert vfile.consts[1].value == 'true' assert vfile.consts[1].value == 'true'
@@ -68,13 +68,13 @@ pub fn create_person(name string, age int) Person {
// Test functions // Test functions
functions := vfile.functions() functions := vfile.functions()
assert functions.len == 2 assert functions.len == 2
// Test method // Test method
assert functions[0].name == 'greet' assert functions[0].name == 'greet'
assert functions[0].is_pub == true assert functions[0].is_pub == true
assert functions[0].receiver.typ.vgen() == 'Person' assert functions[0].receiver.typ.vgen() == 'Person'
assert functions[0].result.typ.vgen() == 'string' assert functions[0].result.typ.vgen() == 'string'
// Test standalone function // Test standalone function
assert functions[1].name == 'create_person' assert functions[1].name == 'create_person'
assert functions[1].is_pub == true assert functions[1].is_pub == true

View File

@@ -133,30 +133,30 @@ pub fn parse_function(code_ string) !Function {
// Extract the result type, handling the ! for result types // Extract the result type, handling the ! for result types
mut result_type := code.all_after(')').all_before('{').replace(' ', '') mut result_type := code.all_after(')').all_before('{').replace(' ', '')
mut has_return := false mut has_return := false
// Check if the result type contains ! // Check if the result type contains !
if result_type.contains('!') { if result_type.contains('!') {
has_return = true has_return = true
result_type = result_type.replace('!', '') result_type = result_type.replace('!', '')
} }
result := new_param( result := new_param(
v: result_type v: result_type
)! )!
body := if code.contains('{') { code.all_after('{').all_before_last('}') } else { '' } body := if code.contains('{') { code.all_after('{').all_before_last('}') } else { '' }
// Process the comments into a description // Process the comments into a description
description := comment_lines.join('\n') description := comment_lines.join('\n')
return Function{ return Function{
name: name name: name
receiver: receiver receiver: receiver
params: params params: params
result: result result: result
body: body body: body
description: description description: description
is_pub: is_pub is_pub: is_pub
has_return: has_return has_return: has_return
} }
} }

View File

@@ -2,20 +2,20 @@ module code
fn test_parse_function_with_comments() { fn test_parse_function_with_comments() {
// Test function string with comments // Test function string with comments
function_str := '// test_function is a simple function for testing the MCP tool code generation function_str := "// test_function is a simple function for testing the MCP tool code generation
// It takes a config and returns a result // It takes a config and returns a result
pub fn test_function(config TestConfig) !TestResult { pub fn test_function(config TestConfig) !TestResult {
// This is just a mock implementation for testing purposes // This is just a mock implementation for testing purposes
if config.name == \'\' { if config.name == '' {
return error(\'Name cannot be empty\') return error('Name cannot be empty')
} }
return TestResult{ return TestResult{
success: config.enabled success: config.enabled
message: \'Test completed for \${config.name}\' message: 'Test completed for \${config.name}'
code: if config.enabled { 0 } else { 1 } code: if config.enabled { 0 } else { 1 }
} }
}' }"
// Parse the function // Parse the function
function := parse_function(function_str) or { function := parse_function(function_str) or {
@@ -30,7 +30,7 @@ pub fn test_function(config TestConfig) !TestResult {
assert function.params[0].name == 'config' assert function.params[0].name == 'config'
assert function.params[0].typ.symbol() == 'TestConfig' assert function.params[0].typ.symbol() == 'TestConfig'
assert function.result.typ.symbol() == 'TestResult' assert function.result.typ.symbol() == 'TestResult'
// Verify that the comments were correctly parsed into the description // Verify that the comments were correctly parsed into the description
expected_description := 'test_function is a simple function for testing the MCP tool code generation expected_description := 'test_function is a simple function for testing the MCP tool code generation
It takes a config and returns a result' It takes a config and returns a result'
@@ -41,9 +41,9 @@ It takes a config and returns a result'
fn test_parse_function_without_comments() { fn test_parse_function_without_comments() {
// Test function string without comments // Test function string without comments
function_str := 'fn simple_function(name string, count int) string { function_str := "fn simple_function(name string, count int) string {
return \'\${name} count: \${count}\' return '\${name} count: \${count}'
}' }"
// Parse the function // Parse the function
function := parse_function(function_str) or { function := parse_function(function_str) or {
@@ -60,7 +60,7 @@ fn test_parse_function_without_comments() {
assert function.params[1].name == 'count' assert function.params[1].name == 'count'
assert function.params[1].typ.symbol() == 'int' assert function.params[1].typ.symbol() == 'int'
assert function.result.typ.symbol() == 'string' assert function.result.typ.symbol() == 'string'
// Verify that there is no description // Verify that there is no description
assert function.description == '' assert function.description == ''

View File

@@ -79,4 +79,4 @@ pub fn (mod Module) write_str() !string {
} }
return out return out
} }

View File

@@ -69,10 +69,11 @@ pub fn parse_struct(code_ string) !Struct {
trimmed := line.trim_space() trimmed := line.trim_space()
if !in_struct && trimmed.starts_with('//') { if !in_struct && trimmed.starts_with('//') {
comment_lines << trimmed.trim_string_left('//').trim_space() comment_lines << trimmed.trim_string_left('//').trim_space()
} else if !in_struct && (trimmed.starts_with('struct ') || trimmed.starts_with('pub struct ')) { } else if !in_struct && (trimmed.starts_with('struct ')
|| trimmed.starts_with('pub struct ')) {
in_struct = true in_struct = true
struct_lines << line struct_lines << line
// Extract struct name // Extract struct name
is_pub = trimmed.starts_with('pub ') is_pub = trimmed.starts_with('pub ')
mut name_part := if is_pub { mut name_part := if is_pub {
@@ -80,7 +81,7 @@ pub fn parse_struct(code_ string) !Struct {
} else { } else {
trimmed.trim_string_left('struct ').trim_space() trimmed.trim_string_left('struct ').trim_space()
} }
// Handle generics in struct name // Handle generics in struct name
if name_part.contains('<') { if name_part.contains('<') {
struct_name = name_part.all_before('<').trim_space() struct_name = name_part.all_before('<').trim_space()
@@ -91,72 +92,71 @@ pub fn parse_struct(code_ string) !Struct {
} }
} else if in_struct { } else if in_struct {
struct_lines << line struct_lines << line
// Check if we've reached the end of the struct // Check if we've reached the end of the struct
if trimmed.starts_with('}') { if trimmed.starts_with('}') {
break break
} }
} }
} }
if struct_name == '' { if struct_name == '' {
return error('Invalid struct format: could not extract struct name') return error('Invalid struct format: could not extract struct name')
} }
// Process the struct fields // Process the struct fields
mut fields := []StructField{} mut fields := []StructField{}
mut current_section := '' mut current_section := ''
for i := 1; i < struct_lines.len - 1; i++ { // Skip the first and last lines (struct declaration and closing brace) for i := 1; i < struct_lines.len - 1; i++ { // Skip the first and last lines (struct declaration and closing brace)
line := struct_lines[i].trim_space() line := struct_lines[i].trim_space()
// Skip empty lines and comments // Skip empty lines and comments
if line == '' || line.starts_with('//') { if line == '' || line.starts_with('//') {
continue continue
} }
// Check for section markers (pub:, mut:, pub mut:) // Check for section markers (pub:, mut:, pub mut:)
if line.ends_with(':') { if line.ends_with(':') {
current_section = line current_section = line
continue continue
} }
// Parse field // Parse field
parts := line.split_any(' \t') parts := line.split_any(' \t')
if parts.len < 2 { if parts.len < 2 {
continue // Skip invalid lines continue // Skip invalid lines
} }
field_name := parts[0] field_name := parts[0]
field_type_str := parts[1..].join(' ') field_type_str := parts[1..].join(' ')
// Parse the type string into a Type object // Parse the type string into a Type object
field_type := parse_type(field_type_str) field_type := parse_type(field_type_str)
// Determine field visibility based on section // Determine field visibility based on section
is_pub_field := current_section.contains('pub') is_pub_field := current_section.contains('pub')
is_mut_field := current_section.contains('mut') is_mut_field := current_section.contains('mut')
fields << StructField{ fields << StructField{
name: field_name name: field_name
typ: field_type typ: field_type
is_pub: is_pub_field is_pub: is_pub_field
is_mut: is_mut_field is_mut: is_mut_field
} }
} }
// Process the comments into a description // Process the comments into a description
description := comment_lines.join('\n') description := comment_lines.join('\n')
return Struct{ return Struct{
name: struct_name name: struct_name
description: description description: description
is_pub: is_pub is_pub: is_pub
fields: fields fields: fields
} }
} }
pub struct Interface { pub struct Interface {
pub mut: pub mut:
name string name string

View File

@@ -21,17 +21,17 @@ pub:
It contains information about test execution' It contains information about test execution'
assert result.is_pub == true assert result.is_pub == true
assert result.fields.len == 3 assert result.fields.len == 3
assert result.fields[0].name == 'success' assert result.fields[0].name == 'success'
assert result.fields[0].typ.symbol() == 'bool' assert result.fields[0].typ.symbol() == 'bool'
assert result.fields[0].is_pub == true assert result.fields[0].is_pub == true
assert result.fields[0].is_mut == false assert result.fields[0].is_mut == false
assert result.fields[1].name == 'message' assert result.fields[1].name == 'message'
assert result.fields[1].typ.symbol() == 'string' assert result.fields[1].typ.symbol() == 'string'
assert result.fields[1].is_pub == true assert result.fields[1].is_pub == true
assert result.fields[1].is_mut == false assert result.fields[1].is_mut == false
assert result.fields[2].name == 'code' assert result.fields[2].name == 'code'
assert result.fields[2].typ.symbol() == 'int' assert result.fields[2].typ.symbol() == 'int'
assert result.fields[2].is_pub == true assert result.fields[2].is_pub == true
@@ -55,17 +55,17 @@ mut:
assert result2.description == '' assert result2.description == ''
assert result2.is_pub == false assert result2.is_pub == false
assert result2.fields.len == 3 assert result2.fields.len == 3
assert result2.fields[0].name == 'name' assert result2.fields[0].name == 'name'
assert result2.fields[0].typ.symbol() == 'string' assert result2.fields[0].typ.symbol() == 'string'
assert result2.fields[0].is_pub == true assert result2.fields[0].is_pub == true
assert result2.fields[0].is_mut == false assert result2.fields[0].is_mut == false
assert result2.fields[1].name == 'count' assert result2.fields[1].name == 'count'
assert result2.fields[1].typ.symbol() == 'int' assert result2.fields[1].typ.symbol() == 'int'
assert result2.fields[1].is_pub == false assert result2.fields[1].is_pub == false
assert result2.fields[1].is_mut == true assert result2.fields[1].is_mut == true
assert result2.fields[2].name == 'active' assert result2.fields[2].name == 'active'
assert result2.fields[2].typ.symbol() == 'bool' assert result2.fields[2].typ.symbol() == 'bool'
assert result2.fields[2].is_pub == false assert result2.fields[2].is_pub == false

View File

@@ -239,7 +239,7 @@ pub fn (t Type) empty_value() string {
pub fn parse_type(type_str string) Type { pub fn parse_type(type_str string) Type {
println('Parsing type string: "${type_str}"') println('Parsing type string: "${type_str}"')
mut type_str_trimmed := type_str.trim_space() mut type_str_trimmed := type_str.trim_space()
// Handle struct definitions by extracting just the struct name // Handle struct definitions by extracting just the struct name
if type_str_trimmed.contains('struct ') { if type_str_trimmed.contains('struct ') {
lines := type_str_trimmed.split_into_lines() lines := type_str_trimmed.split_into_lines()
@@ -257,7 +257,7 @@ pub fn parse_type(type_str string) Type {
} }
} }
} }
// Check for simple types first // Check for simple types first
if type_str_trimmed == 'string' { if type_str_trimmed == 'string' {
return String{} return String{}
@@ -266,41 +266,61 @@ pub fn parse_type(type_str string) Type {
} else if type_str_trimmed == 'int' { } else if type_str_trimmed == 'int' {
return Integer{} return Integer{}
} else if type_str_trimmed == 'u8' { } else if type_str_trimmed == 'u8' {
return Integer{bytes: 8, signed: false} return Integer{
bytes: 8
signed: false
}
} else if type_str_trimmed == 'u16' { } else if type_str_trimmed == 'u16' {
return Integer{bytes: 16, signed: false} return Integer{
bytes: 16
signed: false
}
} else if type_str_trimmed == 'u32' { } else if type_str_trimmed == 'u32' {
return Integer{bytes: 32, signed: false} return Integer{
bytes: 32
signed: false
}
} else if type_str_trimmed == 'u64' { } else if type_str_trimmed == 'u64' {
return Integer{bytes: 64, signed: false} return Integer{
bytes: 64
signed: false
}
} else if type_str_trimmed == 'i8' { } else if type_str_trimmed == 'i8' {
return Integer{bytes: 8} return Integer{
bytes: 8
}
} else if type_str_trimmed == 'i16' { } else if type_str_trimmed == 'i16' {
return Integer{bytes: 16} return Integer{
bytes: 16
}
} else if type_str_trimmed == 'i32' { } else if type_str_trimmed == 'i32' {
return Integer{bytes: 32} return Integer{
bytes: 32
}
} else if type_str_trimmed == 'i64' { } else if type_str_trimmed == 'i64' {
return Integer{bytes: 64} return Integer{
bytes: 64
}
} }
// Check for array types // Check for array types
if type_str_trimmed.starts_with('[]') { if type_str_trimmed.starts_with('[]') {
elem_type := type_str_trimmed.all_after('[]') elem_type := type_str_trimmed.all_after('[]')
return Array{parse_type(elem_type)} return Array{parse_type(elem_type)}
} }
// Check for map types // Check for map types
if type_str_trimmed.starts_with('map[') && type_str_trimmed.contains(']') { if type_str_trimmed.starts_with('map[') && type_str_trimmed.contains(']') {
value_type := type_str_trimmed.all_after(']') value_type := type_str_trimmed.all_after(']')
return Map{parse_type(value_type)} return Map{parse_type(value_type)}
} }
// Check for result types // Check for result types
if type_str_trimmed.starts_with('!') { if type_str_trimmed.starts_with('!') {
result_type := type_str_trimmed.all_after('!') result_type := type_str_trimmed.all_after('!')
return Result{parse_type(result_type)} return Result{parse_type(result_type)}
} }
// If no other type matches, treat as an object/struct type // If no other type matches, treat as an object/struct type
println('Treating as object type: "${type_str_trimmed}"') println('Treating as object type: "${type_str_trimmed}"')
return Object{type_str_trimmed} return Object{type_str_trimmed}

View File

@@ -66,15 +66,17 @@ fn find_closing_brace(content string, start_i int) ?int {
// RETURNS: // RETURNS:
// string - the function block including comments, or error if not found // string - the function block including comments, or error if not found
pub fn get_function_from_file(file_path string, function_name string) !Function { pub fn get_function_from_file(file_path string, function_name string) !Function {
content := os.read_file(file_path) or { return error('Failed to read file ${file_path}: ${err}') } content := os.read_file(file_path) or {
return error('Failed to read file ${file_path}: ${err}')
}
vfile := parse_vfile(content) or { return error('Failed to parse file ${file_path}: ${err}') } vfile := parse_vfile(content) or { return error('Failed to parse file ${file_path}: ${err}') }
if fn_obj := vfile.get_function(function_name) { if fn_obj := vfile.get_function(function_name) {
return fn_obj return fn_obj
} }
return error('function ${function_name} not found in file ${file_path}') return error('function ${function_name} not found in file ${file_path}')
} }
// get_function_from_module searches for a function in all V files within a module // get_function_from_module searches for a function in all V files within a module
@@ -91,15 +93,11 @@ pub fn get_function_from_module(module_path string, function_name string) !Funct
log.error('Found ${v_files} V files in ${module_path}') log.error('Found ${v_files} V files in ${module_path}')
for v_file in v_files { for v_file in v_files {
// Read the file content // Read the file content
content := os.read_file(v_file) or { content := os.read_file(v_file) or { continue }
continue
}
// Parse the file // Parse the file
vfile := parse_vfile(content) or { vfile := parse_vfile(content) or { continue }
continue
}
// Look for the function // Look for the function
if fn_obj := vfile.get_function(function_name) { if fn_obj := vfile.get_function(function_name) {
return fn_obj return fn_obj
@@ -139,7 +137,7 @@ pub fn get_type_from_module(module_path string, type_name string) !string {
if i == -1 { if i == -1 {
type_import := content.split_into_lines().filter(it.contains('import') type_import := content.split_into_lines().filter(it.contains('import')
&& it.contains(type_name)) && it.contains(type_name))
if type_import.len > 0 { if type_import.len > 0 {
log.debug('debugzoooo') log.debug('debugzoooo')
mod := type_import[0].trim_space().trim_string_left('import ').all_before(' ') mod := type_import[0].trim_space().trim_string_left('import ').all_before(' ')

View File

@@ -8,17 +8,17 @@ pub:
data []u8 data []u8
} }
// to_bytes converts a Currency to serialized bytes // to_bytes converts a Currency to serialized bytes
pub fn (c Currency) to_bytes() !CurrencyBytes { pub fn (c Currency) to_bytes() !CurrencyBytes {
mut enc := encoder.new() mut enc := encoder.new()
// Add unique encoding ID to identify this type of data // Add unique encoding ID to identify this type of data
enc.add_u16(500) // Unique ID for Currency type enc.add_u16(500) // Unique ID for Currency type
// Encode Currency fields // Encode Currency fields
enc.add_string(c.name) enc.add_string(c.name)
enc.add_f64(c.usdval) enc.add_f64(c.usdval)
return CurrencyBytes{ return CurrencyBytes{
data: enc.data data: enc.data
} }
@@ -28,16 +28,16 @@ pub fn (c Currency) to_bytes() !CurrencyBytes {
pub fn from_bytes(bytes CurrencyBytes) !Currency { pub fn from_bytes(bytes CurrencyBytes) !Currency {
mut d := encoder.decoder_new(bytes.data) mut d := encoder.decoder_new(bytes.data)
mut currency := Currency{} mut currency := Currency{}
// Check encoding ID to verify this is the correct type of data // Check encoding ID to verify this is the correct type of data
encoding_id := d.get_u16()! encoding_id := d.get_u16()!
if encoding_id != 500 { if encoding_id != 500 {
return error('Wrong file type: expected encoding ID 500, got ${encoding_id}, for currency') return error('Wrong file type: expected encoding ID 500, got ${encoding_id}, for currency')
} }
// Decode Currency fields // Decode Currency fields
currency.name = d.get_string()! currency.name = d.get_string()!
currency.usdval = d.get_f64()! currency.usdval = d.get_f64()!
return currency return currency
} }

View File

@@ -241,6 +241,6 @@ pub fn (mut d Decoder) get_map_bytes() !map[string][]u8 {
// Gets GID from encoded string // Gets GID from encoded string
pub fn (mut d Decoder) get_gid() !gid.GID { pub fn (mut d Decoder) get_gid() !gid.GID {
gid_str := d.get_string()! gid_str := d.get_string()!
return gid.new(gid_str) return gid.new(gid_str)
} }

View File

@@ -191,17 +191,17 @@ fn test_map_bytes() {
fn test_gid() { fn test_gid() {
// Test with a standard GID // Test with a standard GID
mut e := new() mut e := new()
mut g1 := gid.new("myproject:123")! mut g1 := gid.new('myproject:123')!
e.add_gid(g1) e.add_gid(g1)
// Test with a GID that has a default circle name // Test with a GID that has a default circle name
mut g2 := gid.new_from_parts("", 999)! mut g2 := gid.new_from_parts('', 999)!
e.add_gid(g2) e.add_gid(g2)
// Test with a GID that has spaces before fixing // Test with a GID that has spaces before fixing
mut g3 := gid.new("project1:456")! mut g3 := gid.new('project1:456')!
e.add_gid(g3) e.add_gid(g3)
mut d := decoder_new(e.data) mut d := decoder_new(e.data)
assert d.get_gid()!.str() == g1.str() assert d.get_gid()!.str() == g1.str()
assert d.get_gid()!.str() == g2.str() assert d.get_gid()!.str() == g2.str()
@@ -211,74 +211,74 @@ fn test_gid() {
fn test_currency() { fn test_currency() {
// Create USD currency manually // Create USD currency manually
mut usd_curr := currency.Currency{ mut usd_curr := currency.Currency{
name: 'USD' name: 'USD'
usdval: 1.0 usdval: 1.0
} }
// Create EUR currency manually // Create EUR currency manually
mut eur_curr := currency.Currency{ mut eur_curr := currency.Currency{
name: 'EUR' name: 'EUR'
usdval: 1.1 usdval: 1.1
} }
// Create Bitcoin currency manually // Create Bitcoin currency manually
mut btc_curr := currency.Currency{ mut btc_curr := currency.Currency{
name: 'BTC' name: 'BTC'
usdval: 60000.0 usdval: 60000.0
} }
// Create TFT currency manually // Create TFT currency manually
mut tft_curr := currency.Currency{ mut tft_curr := currency.Currency{
name: 'TFT' name: 'TFT'
usdval: 0.05 usdval: 0.05
} }
// Create currency amounts // Create currency amounts
mut usd_amount := currency.Amount{ mut usd_amount := currency.Amount{
currency: usd_curr currency: usd_curr
val: 1.5 val: 1.5
} }
mut eur_amount := currency.Amount{ mut eur_amount := currency.Amount{
currency: eur_curr currency: eur_curr
val: 100.0 val: 100.0
} }
mut btc_amount := currency.Amount{ mut btc_amount := currency.Amount{
currency: btc_curr currency: btc_curr
val: 0.01 val: 0.01
} }
mut tft_amount := currency.Amount{ mut tft_amount := currency.Amount{
currency: tft_curr currency: tft_curr
val: 1000.0 val: 1000.0
} }
mut e := new() mut e := new()
e.add_currency(usd_amount) e.add_currency(usd_amount)
e.add_currency(eur_amount) e.add_currency(eur_amount)
e.add_currency(btc_amount) e.add_currency(btc_amount)
e.add_currency(tft_amount) e.add_currency(tft_amount)
mut d := decoder_new(e.data) mut d := decoder_new(e.data)
// Override the currency.get function by manually checking currency names // Override the currency.get function by manually checking currency names
// since we can't rely on the global currency functions for testing // since we can't rely on the global currency functions for testing
mut decoded_curr1 := d.get_string()! mut decoded_curr1 := d.get_string()!
mut decoded_val1 := d.get_f64()! mut decoded_val1 := d.get_f64()!
assert decoded_curr1 == 'USD' assert decoded_curr1 == 'USD'
assert math.abs(decoded_val1 - 1.5) < 0.00001 assert math.abs(decoded_val1 - 1.5) < 0.00001
mut decoded_curr2 := d.get_string()! mut decoded_curr2 := d.get_string()!
mut decoded_val2 := d.get_f64()! mut decoded_val2 := d.get_f64()!
assert decoded_curr2 == 'EUR' assert decoded_curr2 == 'EUR'
assert math.abs(decoded_val2 - 100.0) < 0.00001 assert math.abs(decoded_val2 - 100.0) < 0.00001
mut decoded_curr3 := d.get_string()! mut decoded_curr3 := d.get_string()!
mut decoded_val3 := d.get_f64()! mut decoded_val3 := d.get_f64()!
assert decoded_curr3 == 'BTC' assert decoded_curr3 == 'BTC'
assert math.abs(decoded_val3 - 0.01) < 0.00001 assert math.abs(decoded_val3 - 0.01) < 0.00001
mut decoded_curr4 := d.get_string()! mut decoded_curr4 := d.get_string()!
mut decoded_val4 := d.get_f64()! mut decoded_val4 := d.get_f64()!
assert decoded_curr4 == 'TFT' assert decoded_curr4 == 'TFT'

View File

@@ -31,23 +31,23 @@ pub fn new(txt_ string) !GID {
} }
cid_str := parts[1].trim_space() cid_str := parts[1].trim_space()
cid := cid_str.u32() //TODO: what if this is no nr? cid := cid_str.u32() // TODO: what if this is no nr?
return GID{ return GID{
circle: circle circle: circle
cid: cid cid: cid
} }
} }
pub fn new_from_parts(circle_ string, cid u32) !GID { pub fn new_from_parts(circle_ string, cid u32) !GID {
mut circle:=circle_ mut circle := circle_
if circle.trim_space() == '' { if circle.trim_space() == '' {
circle="default" circle = 'default'
} }
return GID{ return GID{
circle: circle circle: circle
cid: cid cid: cid
} }
} }

View File

@@ -4,7 +4,7 @@ import os
// Define a struct for test cases // Define a struct for test cases
struct PrefixEdgeCaseTest { struct PrefixEdgeCaseTest {
prefix string prefix string
expected_keys []string expected_keys []string
} }
@@ -17,10 +17,20 @@ fn test_edge_case_prefix_search() {
// Keys with a common prefix that may cause issues // Keys with a common prefix that may cause issues
keys := [ keys := [
'test', 'testing', 'tea', 'team', 'technology', 'test',
'apple', 'application', 'appreciate', 'testing',
'banana', 'bandage', 'band', 'tea',
'car', 'carpet', 'carriage' 'team',
'technology',
'apple',
'application',
'appreciate',
'banana',
'bandage',
'band',
'car',
'carpet',
'carriage',
] ]
// Insert all keys // Insert all keys
@@ -36,59 +46,58 @@ fn test_edge_case_prefix_search() {
test_cases := [ test_cases := [
// prefix, expected_keys // prefix, expected_keys
PrefixEdgeCaseTest{ PrefixEdgeCaseTest{
prefix: 'te' prefix: 'te'
expected_keys: ['test', 'testing', 'tea', 'team', 'technology'] expected_keys: ['test', 'testing', 'tea', 'team', 'technology']
}, },
PrefixEdgeCaseTest{ PrefixEdgeCaseTest{
prefix: 'tes' prefix: 'tes'
expected_keys: ['test', 'testing'] expected_keys: ['test', 'testing']
}, },
PrefixEdgeCaseTest{ PrefixEdgeCaseTest{
prefix: 'tea' prefix: 'tea'
expected_keys: ['tea', 'team'] expected_keys: ['tea', 'team']
}, },
PrefixEdgeCaseTest{ PrefixEdgeCaseTest{
prefix: 'a' prefix: 'a'
expected_keys: ['apple', 'application', 'appreciate'] expected_keys: ['apple', 'application', 'appreciate']
}, },
PrefixEdgeCaseTest{ PrefixEdgeCaseTest{
prefix: 'ba' prefix: 'ba'
expected_keys: ['banana', 'bandage', 'band'] expected_keys: ['banana', 'bandage', 'band']
}, },
PrefixEdgeCaseTest{ PrefixEdgeCaseTest{
prefix: 'ban' prefix: 'ban'
expected_keys: ['banana', 'band'] expected_keys: ['banana', 'band']
}, },
PrefixEdgeCaseTest{ PrefixEdgeCaseTest{
prefix: 'c' prefix: 'c'
expected_keys: ['car', 'carpet', 'carriage'] expected_keys: ['car', 'carpet', 'carriage']
} },
] ]
for test_case in test_cases { for test_case in test_cases {
prefix := test_case.prefix prefix := test_case.prefix
expected_keys := test_case.expected_keys expected_keys := test_case.expected_keys
result := tree.list(prefix) or { result := tree.list(prefix) or {
assert false, 'Failed to list keys with prefix "${prefix}": ${err}' assert false, 'Failed to list keys with prefix "${prefix}": ${err}'
return return
} }
// Check count matches // Check count matches
assert result.len == expected_keys.len, assert result.len == expected_keys.len, 'For prefix "${prefix}": expected ${expected_keys.len} keys, got ${result.len} (keys: ${result})'
'For prefix "${prefix}": expected ${expected_keys.len} keys, got ${result.len} (keys: ${result})'
// Check all expected keys are present // Check all expected keys are present
for key in expected_keys { for key in expected_keys {
assert key in result, 'Key "${key}" missing from results for prefix "${prefix}"' assert key in result, 'Key "${key}" missing from results for prefix "${prefix}"'
} }
// Verify each result starts with the prefix // Verify each result starts with the prefix
for key in result { for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"' assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
} }
} }
println('All edge case prefix tests passed successfully!') println('All edge case prefix tests passed successfully!')
} }
@@ -102,8 +111,13 @@ fn test_tricky_insertion_order() {
// Insert keys in a specific order that might trigger the issue // Insert keys in a specific order that might trigger the issue
// Insert 'team' first, then 'test', etc. to ensure tree layout is challenging // Insert 'team' first, then 'test', etc. to ensure tree layout is challenging
tricky_keys := [ tricky_keys := [
'team', 'test', 'technology', 'tea', // 'te' prefix cases 'team',
'car', 'carriage', 'carpet' // 'ca' prefix cases 'test',
'technology',
'tea', // 'te' prefix cases
'car',
'carriage',
'carpet', // 'ca' prefix cases
] ]
// Insert all keys // Insert all keys
@@ -114,7 +128,7 @@ fn test_tricky_insertion_order() {
return return
} }
} }
// Test 'te' prefix // Test 'te' prefix
te_results := tree.list('te') or { te_results := tree.list('te') or {
assert false, 'Failed to list keys with prefix "te": ${err}' assert false, 'Failed to list keys with prefix "te": ${err}'
@@ -125,7 +139,7 @@ fn test_tricky_insertion_order() {
assert 'test' in te_results, 'Expected "test" in results' assert 'test' in te_results, 'Expected "test" in results'
assert 'technology' in te_results, 'Expected "technology" in results' assert 'technology' in te_results, 'Expected "technology" in results'
assert 'tea' in te_results, 'Expected "tea" in results' assert 'tea' in te_results, 'Expected "tea" in results'
// Test 'ca' prefix // Test 'ca' prefix
ca_results := tree.list('ca') or { ca_results := tree.list('ca') or {
assert false, 'Failed to list keys with prefix "ca": ${err}' assert false, 'Failed to list keys with prefix "ca": ${err}'
@@ -135,6 +149,6 @@ fn test_tricky_insertion_order() {
assert 'car' in ca_results, 'Expected "car" in results' assert 'car' in ca_results, 'Expected "car" in results'
assert 'carriage' in ca_results, 'Expected "carriage" in results' assert 'carriage' in ca_results, 'Expected "carriage" in results'
assert 'carpet' in ca_results, 'Expected "carpet" in results' assert 'carpet' in ca_results, 'Expected "carpet" in results'
println('All tricky insertion order tests passed successfully!') println('All tricky insertion order tests passed successfully!')
} }

View File

@@ -4,7 +4,7 @@ import os
// Define a struct for test cases // Define a struct for test cases
struct PrefixTestCase { struct PrefixTestCase {
prefix string prefix string
expected_count int expected_count int
} }
@@ -17,13 +17,31 @@ fn test_complex_prefix_search() {
// Insert a larger set of keys with various prefixes // Insert a larger set of keys with various prefixes
keys := [ keys := [
'a', 'ab', 'abc', 'abcd', 'abcde', 'a',
'b', 'bc', 'bcd', 'bcde', 'ab',
'c', 'cd', 'cde', 'abc',
'x', 'xy', 'xyz', 'abcd',
'test', 'testing', 'tested', 'tests', 'abcde',
'team', 'teammate', 'teams', 'b',
'tech', 'technology', 'technical' 'bc',
'bcd',
'bcde',
'c',
'cd',
'cde',
'x',
'xy',
'xyz',
'test',
'testing',
'tested',
'tests',
'team',
'teammate',
'teams',
'tech',
'technology',
'technical',
] ]
// Insert all keys // Insert all keys
@@ -54,8 +72,8 @@ fn test_complex_prefix_search() {
PrefixTestCase{'x', 3}, PrefixTestCase{'x', 3},
PrefixTestCase{'xy', 2}, PrefixTestCase{'xy', 2},
PrefixTestCase{'xyz', 1}, PrefixTestCase{'xyz', 1},
PrefixTestCase{'z', 0}, // No matches PrefixTestCase{'z', 0}, // No matches
PrefixTestCase{'', keys.len} // All keys PrefixTestCase{'', keys.len}, // All keys
] ]
for test_case in test_cases { for test_case in test_cases {
@@ -70,7 +88,7 @@ fn test_complex_prefix_search() {
} }
assert result.len == expected_count, 'For prefix "${prefix}": expected ${expected_count} keys, got ${result.len}' assert result.len == expected_count, 'For prefix "${prefix}": expected ${expected_count} keys, got ${result.len}'
// Verify each result starts with the prefix // Verify each result starts with the prefix
for key in result { for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"' assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
@@ -87,13 +105,21 @@ fn test_special_prefix_search() {
// Insert keys with special characters and longer strings // Insert keys with special characters and longer strings
special_keys := [ special_keys := [
'user:1:profile', 'user:1:settings', 'user:1:posts', 'user:1:profile',
'user:2:profile', 'user:2:settings', 'user:1:settings',
'config:app:name', 'config:app:version', 'config:app:debug', 'user:1:posts',
'config:db:host', 'config:db:port', 'user:2:profile',
'data:2023:01:01', 'data:2023:01:02', 'data:2023:02:01', 'user:2:settings',
'config:app:name',
'config:app:version',
'config:app:debug',
'config:db:host',
'config:db:port',
'data:2023:01:01',
'data:2023:01:02',
'data:2023:02:01',
'very:long:key:with:multiple:segments:and:special:characters:!@#$%^&*()', 'very:long:key:with:multiple:segments:and:special:characters:!@#$%^&*()',
'another:very:long:key:with:different:segments' 'another:very:long:key:with:different:segments',
] ]
// Insert all keys // Insert all keys
@@ -118,7 +144,7 @@ fn test_special_prefix_search() {
PrefixTestCase{'data:2023:01:', 2}, PrefixTestCase{'data:2023:01:', 2},
PrefixTestCase{'very:', 1}, PrefixTestCase{'very:', 1},
PrefixTestCase{'another:', 1}, PrefixTestCase{'another:', 1},
PrefixTestCase{'nonexistent:', 0} PrefixTestCase{'nonexistent:', 0},
] ]
for test_case in special_test_cases { for test_case in special_test_cases {
@@ -133,7 +159,7 @@ fn test_special_prefix_search() {
} }
assert result.len == expected_count, 'For prefix "${prefix}": expected ${expected_count} keys, got ${result.len}' assert result.len == expected_count, 'For prefix "${prefix}": expected ${expected_count} keys, got ${result.len}'
// Verify each result starts with the prefix // Verify each result starts with the prefix
for key in result { for key in result {
assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"' assert key.starts_with(prefix), 'Key "${key}" does not start with prefix "${prefix}"'
@@ -151,9 +177,9 @@ fn test_prefix_search_performance() {
// Generate a larger dataset (1000 keys) // Generate a larger dataset (1000 keys)
prefixes := ['user', 'config', 'data', 'app', 'service', 'api', 'test', 'dev', 'prod', 'staging'] prefixes := ['user', 'config', 'data', 'app', 'service', 'api', 'test', 'dev', 'prod', 'staging']
mut large_keys := []string{} mut large_keys := []string{}
for prefix in prefixes { for prefix in prefixes {
for i in 0..100 { for i in 0 .. 100 {
large_keys << '${prefix}:${i}:name' large_keys << '${prefix}:${i}:name'
} }
} }
@@ -175,7 +201,7 @@ fn test_prefix_search_performance() {
} }
assert result.len == 100, 'For prefix "${prefix}:": expected 100 keys, got ${result.len}' assert result.len == 100, 'For prefix "${prefix}:": expected 100 keys, got ${result.len}'
// Verify each result starts with the prefix // Verify each result starts with the prefix
for key in result { for key in result {
assert key.starts_with(prefix + ':'), 'Key "${key}" does not start with prefix "${prefix}:"' assert key.starts_with(prefix + ':'), 'Key "${key}" does not start with prefix "${prefix}:"'
@@ -184,7 +210,7 @@ fn test_prefix_search_performance() {
// Test more specific prefixes // Test more specific prefixes
for prefix in prefixes { for prefix in prefixes {
for i in 0..10 { for i in 0 .. 10 {
specific_prefix := '${prefix}:${i}' specific_prefix := '${prefix}:${i}'
result := tree.list(specific_prefix) or { result := tree.list(specific_prefix) or {
assert false, 'Failed to list keys with prefix "${specific_prefix}": ${err}' assert false, 'Failed to list keys with prefix "${specific_prefix}": ${err}'
@@ -195,4 +221,4 @@ fn test_prefix_search_performance() {
assert result[0] == '${specific_prefix}:name', 'Expected "${specific_prefix}:name", got "${result[0]}"' assert result[0] == '${specific_prefix}:name', 'Expected "${specific_prefix}:name", got "${result[0]}"'
} }
} }
} }

View File

@@ -62,11 +62,11 @@ fn deserialize_node(data []u8) !Node {
right_id := d.get_u32()! right_id := d.get_u32()!
return Node{ return Node{
character: character character: character
is_end_of_string: is_end_of_string is_end_of_string: is_end_of_string
value: value value: value
left_id: left_id left_id: left_id
middle_id: middle_id middle_id: middle_id
right_id: right_id right_id: right_id
} }
} }

View File

@@ -4,23 +4,23 @@ module tst
fn test_node_serialization() { fn test_node_serialization() {
// Create a leaf node (end of string) // Create a leaf node (end of string)
leaf_node := Node{ leaf_node := Node{
character: `a` character: `a`
is_end_of_string: true is_end_of_string: true
value: 'test value'.bytes() value: 'test value'.bytes()
left_id: 0 left_id: 0
middle_id: 0 middle_id: 0
right_id: 0 right_id: 0
} }
// Serialize the leaf node // Serialize the leaf node
leaf_data := serialize_node(leaf_node) leaf_data := serialize_node(leaf_node)
// Deserialize and verify // Deserialize and verify
deserialized_leaf := deserialize_node(leaf_data) or { deserialized_leaf := deserialize_node(leaf_data) or {
assert false, 'Failed to deserialize leaf node: ${err}' assert false, 'Failed to deserialize leaf node: ${err}'
return return
} }
assert deserialized_leaf.character == leaf_node.character, 'Character mismatch' assert deserialized_leaf.character == leaf_node.character, 'Character mismatch'
assert deserialized_leaf.is_end_of_string == leaf_node.is_end_of_string, 'is_end_of_string mismatch' assert deserialized_leaf.is_end_of_string == leaf_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_leaf.value.bytestr() == leaf_node.value.bytestr(), 'Value mismatch' assert deserialized_leaf.value.bytestr() == leaf_node.value.bytestr(), 'Value mismatch'
@@ -30,23 +30,23 @@ fn test_node_serialization() {
// Create an internal node (not end of string) // Create an internal node (not end of string)
internal_node := Node{ internal_node := Node{
character: `b` character: `b`
is_end_of_string: false is_end_of_string: false
value: []u8{} value: []u8{}
left_id: 10 left_id: 10
middle_id: 20 middle_id: 20
right_id: 30 right_id: 30
} }
// Serialize the internal node // Serialize the internal node
internal_data := serialize_node(internal_node) internal_data := serialize_node(internal_node)
// Deserialize and verify // Deserialize and verify
deserialized_internal := deserialize_node(internal_data) or { deserialized_internal := deserialize_node(internal_data) or {
assert false, 'Failed to deserialize internal node: ${err}' assert false, 'Failed to deserialize internal node: ${err}'
return return
} }
assert deserialized_internal.character == internal_node.character, 'Character mismatch' assert deserialized_internal.character == internal_node.character, 'Character mismatch'
assert deserialized_internal.is_end_of_string == internal_node.is_end_of_string, 'is_end_of_string mismatch' assert deserialized_internal.is_end_of_string == internal_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_internal.value.len == 0, 'Value should be empty' assert deserialized_internal.value.len == 0, 'Value should be empty'
@@ -56,23 +56,23 @@ fn test_node_serialization() {
// Create a root node // Create a root node
root_node := Node{ root_node := Node{
character: 0 // null character for root character: 0 // null character for root
is_end_of_string: false is_end_of_string: false
value: []u8{} value: []u8{}
left_id: 5 left_id: 5
middle_id: 15 middle_id: 15
right_id: 25 right_id: 25
} }
// Serialize the root node // Serialize the root node
root_data := serialize_node(root_node) root_data := serialize_node(root_node)
// Deserialize and verify // Deserialize and verify
deserialized_root := deserialize_node(root_data) or { deserialized_root := deserialize_node(root_data) or {
assert false, 'Failed to deserialize root node: ${err}' assert false, 'Failed to deserialize root node: ${err}'
return return
} }
assert deserialized_root.character == root_node.character, 'Character mismatch' assert deserialized_root.character == root_node.character, 'Character mismatch'
assert deserialized_root.is_end_of_string == root_node.is_end_of_string, 'is_end_of_string mismatch' assert deserialized_root.is_end_of_string == root_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_root.value.len == 0, 'Value should be empty' assert deserialized_root.value.len == 0, 'Value should be empty'
@@ -85,23 +85,23 @@ fn test_node_serialization() {
fn test_special_serialization() { fn test_special_serialization() {
// Create a node with special character // Create a node with special character
special_node := Node{ special_node := Node{
character: `!` // special character character: `!` // special character
is_end_of_string: true is_end_of_string: true
value: 'special value with spaces and symbols: !@#$%^&*()'.bytes() value: 'special value with spaces and symbols: !@#$%^&*()'.bytes()
left_id: 42 left_id: 42
middle_id: 99 middle_id: 99
right_id: 123 right_id: 123
} }
// Serialize the special node // Serialize the special node
special_data := serialize_node(special_node) special_data := serialize_node(special_node)
// Deserialize and verify // Deserialize and verify
deserialized_special := deserialize_node(special_data) or { deserialized_special := deserialize_node(special_data) or {
assert false, 'Failed to deserialize special node: ${err}' assert false, 'Failed to deserialize special node: ${err}'
return return
} }
assert deserialized_special.character == special_node.character, 'Character mismatch' assert deserialized_special.character == special_node.character, 'Character mismatch'
assert deserialized_special.is_end_of_string == special_node.is_end_of_string, 'is_end_of_string mismatch' assert deserialized_special.is_end_of_string == special_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_special.value.bytestr() == special_node.value.bytestr(), 'Value mismatch' assert deserialized_special.value.bytestr() == special_node.value.bytestr(), 'Value mismatch'
@@ -111,37 +111,37 @@ fn test_special_serialization() {
// Create a node with a large value // Create a node with a large value
mut large_value := []u8{len: 1000} mut large_value := []u8{len: 1000}
for i in 0..1000 { for i in 0 .. 1000 {
large_value[i] = u8(i % 256) large_value[i] = u8(i % 256)
} }
large_node := Node{ large_node := Node{
character: `z` character: `z`
is_end_of_string: true is_end_of_string: true
value: large_value value: large_value
left_id: 1 left_id: 1
middle_id: 2 middle_id: 2
right_id: 3 right_id: 3
} }
// Serialize the large node // Serialize the large node
large_data := serialize_node(large_node) large_data := serialize_node(large_node)
// Deserialize and verify // Deserialize and verify
deserialized_large := deserialize_node(large_data) or { deserialized_large := deserialize_node(large_data) or {
assert false, 'Failed to deserialize large node: ${err}' assert false, 'Failed to deserialize large node: ${err}'
return return
} }
assert deserialized_large.character == large_node.character, 'Character mismatch' assert deserialized_large.character == large_node.character, 'Character mismatch'
assert deserialized_large.is_end_of_string == large_node.is_end_of_string, 'is_end_of_string mismatch' assert deserialized_large.is_end_of_string == large_node.is_end_of_string, 'is_end_of_string mismatch'
assert deserialized_large.value.len == large_node.value.len, 'Value length mismatch' assert deserialized_large.value.len == large_node.value.len, 'Value length mismatch'
// Check each byte of the large value // Check each byte of the large value
for i in 0..large_node.value.len { for i in 0 .. large_node.value.len {
assert deserialized_large.value[i] == large_node.value[i], 'Value byte mismatch at index ${i}' assert deserialized_large.value[i] == large_node.value[i], 'Value byte mismatch at index ${i}'
} }
assert deserialized_large.left_id == large_node.left_id, 'left_id mismatch' assert deserialized_large.left_id == large_node.left_id, 'left_id mismatch'
assert deserialized_large.middle_id == large_node.middle_id, 'middle_id mismatch' assert deserialized_large.middle_id == large_node.middle_id, 'middle_id mismatch'
assert deserialized_large.right_id == large_node.right_id, 'right_id mismatch' assert deserialized_large.right_id == large_node.right_id, 'right_id mismatch'
@@ -151,24 +151,24 @@ fn test_special_serialization() {
fn test_version_handling() { fn test_version_handling() {
// Create a valid node // Create a valid node
valid_node := Node{ valid_node := Node{
character: `a` character: `a`
is_end_of_string: true is_end_of_string: true
value: 'test'.bytes() value: 'test'.bytes()
left_id: 0 left_id: 0
middle_id: 0 middle_id: 0
right_id: 0 right_id: 0
} }
// Serialize the node // Serialize the node
mut valid_data := serialize_node(valid_node) mut valid_data := serialize_node(valid_node)
// Corrupt the version byte // Corrupt the version byte
valid_data[0] = 99 // Invalid version valid_data[0] = 99 // Invalid version
// Attempt to deserialize with invalid version // Attempt to deserialize with invalid version
deserialize_node(valid_data) or { deserialize_node(valid_data) or {
assert err.str().contains('Invalid version byte'), 'Expected version error, got: ${err}' assert err.str().contains('Invalid version byte'), 'Expected version error, got: ${err}'
return return
} }
assert false, 'Expected error for invalid version byte' assert false, 'Expected error for invalid version byte'
} }

View File

@@ -6,9 +6,9 @@ module tst
// - replaces special characters with standard ones // - replaces special characters with standard ones
pub fn namefix(s string) string { pub fn namefix(s string) string {
mut result := s.trim_space().to_lower() mut result := s.trim_space().to_lower()
// Replace any problematic characters or sequences if needed // Replace any problematic characters or sequences if needed
// For this implementation, we'll keep it simple // For this implementation, we'll keep it simple
return result return result
} }

View File

@@ -5,12 +5,12 @@ import freeflowuniverse.herolib.data.ourdb
// Represents a node in the ternary search tree // Represents a node in the ternary search tree
struct Node { struct Node {
mut: mut:
character u8 // The character stored at this nodexs character u8 // The character stored at this nodexs
is_end_of_string bool // Flag indicating if this node represents the end of a key is_end_of_string bool // Flag indicating if this node represents the end of a key
value []u8 // The value associated with the key (if this node is the end of a key) value []u8 // The value associated with the key (if this node is the end of a key)
left_id u32 // Database ID for left child (character < node.character) left_id u32 // Database ID for left child (character < node.character)
middle_id u32 // Database ID for middle child (character == node.character) middle_id u32 // Database ID for middle child (character == node.character)
right_id u32 // Database ID for right child (character > node.character) right_id u32 // Database ID for right child (character > node.character)
} }
// TST represents a ternary search tree data structure // TST represents a ternary search tree data structure
@@ -39,18 +39,18 @@ pub fn new(args NewArgs) !TST {
)! )!
mut root_id := u32(1) // First ID in ourdb is now 1 instead of 0 mut root_id := u32(1) // First ID in ourdb is now 1 instead of 0
if db.get_next_id()! == 1 { if db.get_next_id()! == 1 {
// Create a new root node if the database is empty // Create a new root node if the database is empty
// We'll use a null character (0) for the root node // We'll use a null character (0) for the root node
println('Creating new root node') println('Creating new root node')
root := Node{ root := Node{
character: 0 character: 0
is_end_of_string: false is_end_of_string: false
value: []u8{} value: []u8{}
left_id: 0 left_id: 0
middle_id: 0 middle_id: 0
right_id: 0 right_id: 0
} }
root_id = db.set(data: serialize_node(root))! root_id = db.set(data: serialize_node(root))!
println('Root node created with ID: ${root_id}') println('Root node created with ID: ${root_id}')
@@ -74,7 +74,7 @@ pub fn new(args NewArgs) !TST {
pub fn (mut self TST) set(key string, value []u8) ! { pub fn (mut self TST) set(key string, value []u8) ! {
normalized_key := namefix(key) normalized_key := namefix(key)
println('Setting key: "${key}" (normalized: "${normalized_key}")') println('Setting key: "${key}" (normalized: "${normalized_key}")')
if normalized_key.len == 0 { if normalized_key.len == 0 {
return error('Empty key not allowed') return error('Empty key not allowed')
} }
@@ -83,12 +83,12 @@ pub fn (mut self TST) set(key string, value []u8) ! {
if self.root_id == 0 { if self.root_id == 0 {
println('Tree is empty, creating root node') println('Tree is empty, creating root node')
root := Node{ root := Node{
character: 0 character: 0
is_end_of_string: false is_end_of_string: false
value: []u8{} value: []u8{}
left_id: 0 left_id: 0
middle_id: 0 middle_id: 0
right_id: 0 right_id: 0
} }
self.root_id = self.db.set(data: serialize_node(root))! self.root_id = self.db.set(data: serialize_node(root))!
println('Root node created with ID: ${self.root_id}') println('Root node created with ID: ${self.root_id}')
@@ -97,12 +97,12 @@ pub fn (mut self TST) set(key string, value []u8) ! {
// Insert the key-value pair // Insert the key-value pair
mut last_node_id := self.insert_recursive(self.root_id, normalized_key, 0, value)! mut last_node_id := self.insert_recursive(self.root_id, normalized_key, 0, value)!
println('Key "${normalized_key}" inserted to node ${last_node_id}') println('Key "${normalized_key}" inserted to node ${last_node_id}')
// Make sure the last node is marked as end of string with the value // Make sure the last node is marked as end of string with the value
if last_node_id != 0 { if last_node_id != 0 {
node_data := self.db.get(last_node_id)! node_data := self.db.get(last_node_id)!
mut node := deserialize_node(node_data)! mut node := deserialize_node(node_data)!
// Ensure this node is marked as the end of a string // Ensure this node is marked as the end of a string
if !node.is_end_of_string { if !node.is_end_of_string {
println('Setting node ${last_node_id} as end of string') println('Setting node ${last_node_id} as end of string')
@@ -111,7 +111,7 @@ pub fn (mut self TST) set(key string, value []u8) ! {
self.db.set(id: last_node_id, data: serialize_node(node))! self.db.set(id: last_node_id, data: serialize_node(node))!
} }
} }
println('Key "${normalized_key}" inserted successfully') println('Key "${normalized_key}" inserted successfully')
} }
@@ -126,33 +126,33 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
// If we've reached the end of the tree, create a new node // If we've reached the end of the tree, create a new node
if node_id == 0 { if node_id == 0 {
println('Creating new node for character: ${key[pos]} (${key[pos].ascii_str()}) at position ${pos}') println('Creating new node for character: ${key[pos]} (${key[pos].ascii_str()}) at position ${pos}')
// Create a node for this character // Create a node for this character
new_node := Node{ new_node := Node{
character: key[pos] character: key[pos]
is_end_of_string: pos == key.len - 1 is_end_of_string: pos == key.len - 1
value: if pos == key.len - 1 { value.clone() } else { []u8{} } value: if pos == key.len - 1 { value.clone() } else { []u8{} }
left_id: 0 left_id: 0
middle_id: 0 middle_id: 0
right_id: 0 right_id: 0
} }
new_id := self.db.set(data: serialize_node(new_node))! new_id := self.db.set(data: serialize_node(new_node))!
println('New node created with ID: ${new_id}, character: ${key[pos]} (${key[pos].ascii_str()}), is_end: ${pos == key.len - 1}') println('New node created with ID: ${new_id}, character: ${key[pos]} (${key[pos].ascii_str()}), is_end: ${pos == key.len - 1}')
// If this is the last character in the key, we're done // If this is the last character in the key, we're done
if pos == key.len - 1 { if pos == key.len - 1 {
return new_id return new_id
} }
// Otherwise, create the next node in the sequence and link to it // Otherwise, create the next node in the sequence and link to it
next_id := self.insert_recursive(0, key, pos + 1, value)! next_id := self.insert_recursive(0, key, pos + 1, value)!
// Update the middle link // Update the middle link
node_data := self.db.get(new_id)! node_data := self.db.get(new_id)!
mut updated_node := deserialize_node(node_data)! mut updated_node := deserialize_node(node_data)!
updated_node.middle_id = next_id updated_node.middle_id = next_id
self.db.set(id: new_id, data: serialize_node(updated_node))! self.db.set(id: new_id, data: serialize_node(updated_node))!
return new_id return new_id
} }
@@ -161,14 +161,14 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
println('Failed to get node data for ID ${node_id}') println('Failed to get node data for ID ${node_id}')
return error('Node retrieval error: ${err}') return error('Node retrieval error: ${err}')
} }
mut node := deserialize_node(node_data) or { mut node := deserialize_node(node_data) or {
println('Failed to deserialize node with ID ${node_id}') println('Failed to deserialize node with ID ${node_id}')
return error('Node deserialization error: ${err}') return error('Node deserialization error: ${err}')
} }
println('Node ${node_id}: character=${node.character} (${node.character.ascii_str()}), is_end=${node.is_end_of_string}, left=${node.left_id}, middle=${node.middle_id}, right=${node.right_id}') println('Node ${node_id}: character=${node.character} (${node.character.ascii_str()}), is_end=${node.is_end_of_string}, left=${node.left_id}, middle=${node.middle_id}, right=${node.right_id}')
// Compare the current character with the node's character // Compare the current character with the node's character
if key[pos] < node.character { if key[pos] < node.character {
println('Going left for character: ${key[pos]} (${key[pos].ascii_str()}) < ${node.character} (${node.character.ascii_str()})') println('Going left for character: ${key[pos]} (${key[pos].ascii_str()}) < ${node.character} (${node.character.ascii_str()})')
@@ -189,7 +189,7 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
node.value = value node.value = value
self.db.set(id: node_id, data: serialize_node(node))! self.db.set(id: node_id, data: serialize_node(node))!
} else { } else {
println('Going middle for next character: ${key[pos+1]} (${key[pos+1].ascii_str()})') println('Going middle for next character: ${key[pos + 1]} (${key[pos + 1].ascii_str()})')
// Move to the next character in the key // Move to the next character in the key
node.middle_id = self.insert_recursive(node.middle_id, key, pos + 1, value)! node.middle_id = self.insert_recursive(node.middle_id, key, pos + 1, value)!
self.db.set(id: node_id, data: serialize_node(node))! self.db.set(id: node_id, data: serialize_node(node))!
@@ -203,7 +203,7 @@ fn (mut self TST) insert_recursive(node_id u32, key string, pos int, value []u8)
pub fn (mut self TST) get(key string) ![]u8 { pub fn (mut self TST) get(key string) ![]u8 {
normalized_key := namefix(key) normalized_key := namefix(key)
println('Getting key: "${key}" (normalized: "${normalized_key}")') println('Getting key: "${key}" (normalized: "${normalized_key}")')
if normalized_key.len == 0 { if normalized_key.len == 0 {
return error('Empty key not allowed') return error('Empty key not allowed')
} }
@@ -222,48 +222,44 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
println('Node ID is 0, key not found') println('Node ID is 0, key not found')
return error('Key not found') return error('Key not found')
} }
if pos >= key.len { if pos >= key.len {
println('Position ${pos} out of bounds for key "${key}"') println('Position ${pos} out of bounds for key "${key}"')
return error('Key not found - position out of bounds') return error('Key not found - position out of bounds')
} }
// Get the node // Get the node
node_data := self.db.get(node_id) or { node_data := self.db.get(node_id) or {
println('Failed to get node ${node_id}') println('Failed to get node ${node_id}')
return error('Node not found in database') return error('Node not found in database')
} }
node := deserialize_node(node_data) or { node := deserialize_node(node_data) or {
println('Failed to deserialize node ${node_id}') println('Failed to deserialize node ${node_id}')
return error('Failed to deserialize node') return error('Failed to deserialize node')
} }
println('Searching node ${node_id}: char=${node.character}, pos=${pos}, key_char=${key[pos]}') println('Searching node ${node_id}: char=${node.character}, pos=${pos}, key_char=${key[pos]}')
mut result := []u8{} mut result := []u8{}
// Left branch // Left branch
if key[pos] < node.character { if key[pos] < node.character {
println('Going left') println('Going left')
result = self.search_recursive(node.left_id, key, pos) or { result = self.search_recursive(node.left_id, key, pos) or { return error(err.str()) }
return error(err.str())
}
return result return result
} }
// Right branch // Right branch
if key[pos] > node.character { if key[pos] > node.character {
println('Going right') println('Going right')
result = self.search_recursive(node.right_id, key, pos) or { result = self.search_recursive(node.right_id, key, pos) or { return error(err.str()) }
return error(err.str())
}
return result return result
} }
// Character matches // Character matches
println('Character match') println('Character match')
// At end of key // At end of key
if pos == key.len - 1 { if pos == key.len - 1 {
if node.is_end_of_string { if node.is_end_of_string {
@@ -278,17 +274,15 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
return error('Key not found - not marked as end of string') return error('Key not found - not marked as end of string')
} }
} }
// Not at end of key, go to middle // Not at end of key, go to middle
if node.middle_id == 0 { if node.middle_id == 0 {
println('No middle child') println('No middle child')
return error('Key not found - no middle child') return error('Key not found - no middle child')
} }
println('Going to middle child') println('Going to middle child')
result = self.search_recursive(node.middle_id, key, pos + 1) or { result = self.search_recursive(node.middle_id, key, pos + 1) or { return error(err.str()) }
return error(err.str())
}
return result return result
} }
@@ -296,7 +290,7 @@ fn (mut self TST) search_recursive(node_id u32, key string, pos int) ![]u8 {
pub fn (mut self TST) delete(key string) ! { pub fn (mut self TST) delete(key string) ! {
normalized_key := namefix(key) normalized_key := namefix(key)
println('Deleting key: "${key}" (normalized: "${normalized_key}")') println('Deleting key: "${key}" (normalized: "${normalized_key}")')
if normalized_key.len == 0 { if normalized_key.len == 0 {
return error('Empty key not allowed') return error('Empty key not allowed')
} }
@@ -315,7 +309,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Node ID is 0, key not found') println('Node ID is 0, key not found')
return error('Key not found') return error('Key not found')
} }
// Check for position out of bounds // Check for position out of bounds
if pos >= key.len { if pos >= key.len {
println('Position ${pos} is out of bounds for key "${key}"') println('Position ${pos} is out of bounds for key "${key}"')
@@ -327,12 +321,12 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Failed to get node data for ID ${node_id}') println('Failed to get node data for ID ${node_id}')
return error('Node retrieval error: ${err}') return error('Node retrieval error: ${err}')
} }
mut node := deserialize_node(node_data) or { mut node := deserialize_node(node_data) or {
println('Failed to deserialize node with ID ${node_id}') println('Failed to deserialize node with ID ${node_id}')
return error('Node deserialization error: ${err}') return error('Node deserialization error: ${err}')
} }
println('Deleting from node ${node_id}: character=${node.character} (${node.character.ascii_str()}), is_end=${node.is_end_of_string}, left=${node.left_id}, middle=${node.middle_id}, right=${node.right_id}, pos=${pos}') println('Deleting from node ${node_id}: character=${node.character} (${node.character.ascii_str()}), is_end=${node.is_end_of_string}, left=${node.left_id}, middle=${node.middle_id}, right=${node.right_id}, pos=${pos}')
mut deleted := false mut deleted := false
@@ -343,7 +337,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Left child is null, key not found') println('Left child is null, key not found')
return error('Key not found') return error('Key not found')
} }
deleted = self.delete_recursive(node.left_id, key, pos)! deleted = self.delete_recursive(node.left_id, key, pos)!
if deleted && node.left_id != 0 { if deleted && node.left_id != 0 {
// Check if the left child has been deleted // Check if the left child has been deleted
@@ -364,7 +358,7 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
println('Right child is null, key not found') println('Right child is null, key not found')
return error('Key not found') return error('Key not found')
} }
deleted = self.delete_recursive(node.right_id, key, pos)! deleted = self.delete_recursive(node.right_id, key, pos)!
if deleted && node.right_id != 0 { if deleted && node.right_id != 0 {
// Check if the right child has been deleted // Check if the right child has been deleted
@@ -405,12 +399,12 @@ fn (mut self TST) delete_recursive(node_id u32, key string, pos int) !bool {
} }
} else { } else {
// Move to the next character in the key // Move to the next character in the key
println('Moving to next character: ${key[pos+1]} (${key[pos+1].ascii_str()})') println('Moving to next character: ${key[pos + 1]} (${key[pos + 1].ascii_str()})')
if node.middle_id == 0 { if node.middle_id == 0 {
println('Middle child is null, key not found') println('Middle child is null, key not found')
return error('Key not found') return error('Key not found')
} }
deleted = self.delete_recursive(node.middle_id, key, pos + 1)! deleted = self.delete_recursive(node.middle_id, key, pos + 1)!
if deleted && node.middle_id != 0 { if deleted && node.middle_id != 0 {
// Check if the middle child has been deleted // Check if the middle child has been deleted

View File

@@ -18,17 +18,17 @@ pub fn (mut self TST) list(prefix string) ![]string {
// Find the prefix node first // Find the prefix node first
result_info := self.navigate_to_prefix(self.root_id, normalized_prefix, 0) result_info := self.navigate_to_prefix(self.root_id, normalized_prefix, 0)
if !result_info.found { if !result_info.found {
println('Prefix node not found for "${normalized_prefix}"') println('Prefix node not found for "${normalized_prefix}"')
return result // Empty result return result // Empty result
} }
println('Found node for prefix "${normalized_prefix}" at node ${result_info.node_id}, collecting keys') println('Found node for prefix "${normalized_prefix}" at node ${result_info.node_id}, collecting keys')
// Collect all keys from the subtree rooted at the prefix node // Collect all keys from the subtree rooted at the prefix node
self.collect_keys_with_prefix(result_info.node_id, result_info.prefix, mut result)! self.collect_keys_with_prefix(result_info.node_id, result_info.prefix, mut result)!
println('Found ${result.len} keys with prefix "${normalized_prefix}": ${result}') println('Found ${result.len} keys with prefix "${normalized_prefix}": ${result}')
return result return result
} }
@@ -45,23 +45,31 @@ fn (mut self TST) navigate_to_prefix(node_id u32, prefix string, pos int) Prefix
// Base case: no node or out of bounds // Base case: no node or out of bounds
if node_id == 0 || pos >= prefix.len { if node_id == 0 || pos >= prefix.len {
return PrefixSearchResult{ return PrefixSearchResult{
found: false found: false
node_id: 0 node_id: 0
prefix: '' prefix: ''
} }
} }
// Get node // Get node
node_data := self.db.get(node_id) or { node_data := self.db.get(node_id) or {
return PrefixSearchResult{found: false, node_id: 0, prefix: ''} return PrefixSearchResult{
found: false
node_id: 0
prefix: ''
}
} }
node := deserialize_node(node_data) or { node := deserialize_node(node_data) or {
return PrefixSearchResult{found: false, node_id: 0, prefix: ''} return PrefixSearchResult{
found: false
node_id: 0
prefix: ''
}
} }
println('Navigating node ${node_id}: char=${node.character} (${node.character.ascii_str()}), pos=${pos}, prefix_char=${prefix[pos]} (${prefix[pos].ascii_str()})') println('Navigating node ${node_id}: char=${node.character} (${node.character.ascii_str()}), pos=${pos}, prefix_char=${prefix[pos]} (${prefix[pos].ascii_str()})')
// Character comparison // Character comparison
if prefix[pos] < node.character { if prefix[pos] < node.character {
// Go left // Go left
@@ -74,24 +82,28 @@ fn (mut self TST) navigate_to_prefix(node_id u32, prefix string, pos int) Prefix
} else { } else {
// Character match // Character match
println('Character match found') println('Character match found')
// Check if we're at the end of the prefix // Check if we're at the end of the prefix
if pos == prefix.len - 1 { if pos == prefix.len - 1 {
println('Reached end of prefix at node ${node_id}') println('Reached end of prefix at node ${node_id}')
// Return the exact prefix string that was passed in // Return the exact prefix string that was passed in
return PrefixSearchResult{ return PrefixSearchResult{
found: true found: true
node_id: node_id node_id: node_id
prefix: prefix prefix: prefix
} }
} }
// Not at end of prefix, check middle child // Not at end of prefix, check middle child
if node.middle_id == 0 { if node.middle_id == 0 {
println('No middle child, prefix not found') println('No middle child, prefix not found')
return PrefixSearchResult{found: false, node_id: 0, prefix: ''} return PrefixSearchResult{
found: false
node_id: 0
prefix: ''
}
} }
// Continue to middle child with next character // Continue to middle child with next character
return self.navigate_to_prefix(node.middle_id, prefix, pos + 1) return self.navigate_to_prefix(node.middle_id, prefix, pos + 1)
} }
@@ -102,17 +114,17 @@ fn (mut self TST) collect_keys_with_prefix(node_id u32, prefix string, mut resul
if node_id == 0 { if node_id == 0 {
return return
} }
// Get node // Get node
node_data := self.db.get(node_id) or { return } node_data := self.db.get(node_id) or { return }
node := deserialize_node(node_data) or { return } node := deserialize_node(node_data) or { return }
println('Collecting from node ${node_id}, char=${node.character} (${node.character.ascii_str()}), prefix="${prefix}"') println('Collecting from node ${node_id}, char=${node.character} (${node.character.ascii_str()}), prefix="${prefix}"')
// If this node is an end of string and it's not the root, we found a key // If this node is an end of string and it's not the root, we found a key
if node.is_end_of_string && node.character != 0 { if node.is_end_of_string && node.character != 0 {
// The prefix may already contain this node's character // The prefix may already contain this node's character
if prefix.len == 0 || prefix[prefix.len-1] != node.character { if prefix.len == 0 || prefix[prefix.len - 1] != node.character {
println('Found complete key: "${prefix}${node.character.ascii_str()}"') println('Found complete key: "${prefix}${node.character.ascii_str()}"')
result << prefix + node.character.ascii_str() result << prefix + node.character.ascii_str()
} else { } else {
@@ -120,24 +132,24 @@ fn (mut self TST) collect_keys_with_prefix(node_id u32, prefix string, mut resul
result << prefix result << prefix
} }
} }
// Recursively search all children // Recursively search all children
if node.left_id != 0 { if node.left_id != 0 {
self.collect_keys_with_prefix(node.left_id, prefix, mut result)! self.collect_keys_with_prefix(node.left_id, prefix, mut result)!
} }
// For middle child, we need to add this node's character to the prefix // For middle child, we need to add this node's character to the prefix
if node.middle_id != 0 { if node.middle_id != 0 {
mut next_prefix := prefix mut next_prefix := prefix
if node.character != 0 { // Skip root node if node.character != 0 { // Skip root node
// Only add the character if it's not already at the end of the prefix // Only add the character if it's not already at the end of the prefix
if prefix.len == 0 || prefix[prefix.len-1] != node.character { if prefix.len == 0 || prefix[prefix.len - 1] != node.character {
next_prefix += node.character.ascii_str() next_prefix += node.character.ascii_str()
} }
} }
self.collect_keys_with_prefix(node.middle_id, next_prefix, mut result)! self.collect_keys_with_prefix(node.middle_id, next_prefix, mut result)!
} }
if node.right_id != 0 { if node.right_id != 0 {
self.collect_keys_with_prefix(node.right_id, prefix, mut result)! self.collect_keys_with_prefix(node.right_id, prefix, mut result)!
} }
@@ -148,19 +160,19 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
if node_id == 0 { if node_id == 0 {
return return
} }
// Get node // Get node
node_data := self.db.get(node_id) or { return } node_data := self.db.get(node_id) or { return }
node := deserialize_node(node_data) or { return } node := deserialize_node(node_data) or { return }
// Calculate current path // Calculate current path
mut current_prefix := prefix mut current_prefix := prefix
// If this is not the root, add the character // If this is not the root, add the character
if node.character != 0 { if node.character != 0 {
current_prefix += node.character.ascii_str() current_prefix += node.character.ascii_str()
} }
// If this marks the end of a key, add it to the result // If this marks the end of a key, add it to the result
if node.is_end_of_string { if node.is_end_of_string {
println('Found key: ${current_prefix}') println('Found key: ${current_prefix}')
@@ -168,16 +180,16 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
result << current_prefix result << current_prefix
} }
} }
// Visit all children // Visit all children
if node.left_id != 0 { if node.left_id != 0 {
self.collect_all_keys(node.left_id, prefix, mut result)! self.collect_all_keys(node.left_id, prefix, mut result)!
} }
if node.middle_id != 0 { if node.middle_id != 0 {
self.collect_all_keys(node.middle_id, current_prefix, mut result)! self.collect_all_keys(node.middle_id, current_prefix, mut result)!
} }
if node.right_id != 0 { if node.right_id != 0 {
self.collect_all_keys(node.right_id, prefix, mut result)! self.collect_all_keys(node.right_id, prefix, mut result)!
} }
@@ -187,7 +199,7 @@ fn (mut self TST) collect_all_keys(node_id u32, prefix string, mut result []stri
pub fn (mut self TST) getall(prefix string) ![][]u8 { pub fn (mut self TST) getall(prefix string) ![][]u8 {
normalized_prefix := namefix(prefix) normalized_prefix := namefix(prefix)
println('Getting all values with prefix: "${prefix}" (normalized: "${normalized_prefix}")') println('Getting all values with prefix: "${prefix}" (normalized: "${normalized_prefix}")')
// Get all matching keys // Get all matching keys
keys := self.list(normalized_prefix)! keys := self.list(normalized_prefix)!
@@ -201,4 +213,4 @@ pub fn (mut self TST) getall(prefix string) ![][]u8 {
println('Found ${values.len} values with prefix "${normalized_prefix}"') println('Found ${values.len} values with prefix "${normalized_prefix}"')
return values return values
} }

View File

@@ -182,13 +182,13 @@ fn test_getall() {
return return
} }
assert hel_values.len == 2, 'Expected 2 values with prefix "hel", got ${hel_values.len}' assert hel_values.len == 2, 'Expected 2 values with prefix "hel", got ${hel_values.len}'
// Convert byte arrays to strings for easier comparison // Convert byte arrays to strings for easier comparison
mut hel_strings := []string{} mut hel_strings := []string{}
for val in hel_values { for val in hel_values {
hel_strings << val.bytestr() hel_strings << val.bytestr()
} }
assert 'world' in hel_strings, 'Expected "world" in values with prefix "hel"' assert 'world' in hel_strings, 'Expected "world" in values with prefix "hel"'
assert 'me' in hel_strings, 'Expected "me" in values with prefix "hel"' assert 'me' in hel_strings, 'Expected "me" in values with prefix "hel"'
} }
@@ -232,4 +232,4 @@ fn test_persistence() {
} }
assert value2.bytestr() == 'value', 'Expected "value", got "${value2.bytestr()}"' assert value2.bytestr() == 'value', 'Expected "value", got "${value2.bytestr()}"'
} }
} }

View File

@@ -116,11 +116,11 @@ fn (p CustomProperty) xml_str() string {
fn test_custom_property() { fn test_custom_property() {
// Test custom property // Test custom property
custom_prop := CustomProperty{ custom_prop := CustomProperty{
name: 'author' name: 'author'
value: 'Kristof' value: 'Kristof'
namespace: 'C' namespace: 'C'
} }
assert custom_prop.xml_str() == '<C:author>Kristof</C:author>' assert custom_prop.xml_str() == '<C:author>Kristof</C:author>'
assert custom_prop.xml_name() == '<author/>' assert custom_prop.xml_name() == '<author/>'
} }
@@ -131,16 +131,15 @@ fn test_propfind_response() {
props << DisplayName('test-file.txt') props << DisplayName('test-file.txt')
props << GetLastModified('Mon, 01 Jan 2024 12:00:00 GMT') props << GetLastModified('Mon, 01 Jan 2024 12:00:00 GMT')
props << GetContentLength('1024') props << GetContentLength('1024')
// Build a complete PROPFIND response with multistatus // Build a complete PROPFIND response with multistatus
xml_output := '<D:multistatus xmlns:D="DAV:"> xml_output := '<D:multistatus xmlns:D="DAV:">
<D:response> <D:response>
<D:href>/test-file.txt</D:href> <D:href>/test-file.txt</D:href>
${props.xml_str()} ${props.xml_str()}
</D:response> </D:response>
</D:multistatus>' </D:multistatus>' // Verify the XML structure
// Verify the XML structure
assert xml_output.contains('<D:multistatus') assert xml_output.contains('<D:multistatus')
assert xml_output.contains('<D:response>') assert xml_output.contains('<D:response>')
assert xml_output.contains('<D:href>') assert xml_output.contains('<D:href>')
@@ -157,7 +156,7 @@ fn test_propfind_with_missing_properties() {
</D:prop> </D:prop>
<D:status>HTTP/1.1 404 Not Found</D:status> <D:status>HTTP/1.1 404 Not Found</D:status>
</D:propstat>' </D:propstat>'
// Simple verification of structure // Simple verification of structure
assert missing_prop_response.contains('<D:propstat>') assert missing_prop_response.contains('<D:propstat>')
assert missing_prop_response.contains('<D:nonexistent-property/>') assert missing_prop_response.contains('<D:nonexistent-property/>')
@@ -167,12 +166,12 @@ fn test_propfind_with_missing_properties() {
fn test_supported_lock_detailed() { fn test_supported_lock_detailed() {
supported_lock := SupportedLock('') supported_lock := SupportedLock('')
xml_output := supported_lock.xml_str() xml_output := supported_lock.xml_str()
// Test SupportedLock provides a fully formed XML snippet for supportedlock // Test SupportedLock provides a fully formed XML snippet for supportedlock
// Note: This test assumes the actual implementation returns a simplified version // Note: This test assumes the actual implementation returns a simplified version
// as indicated by the xml_str() method which returns '<D:supportedlock>...</D:supportedlock>' // as indicated by the xml_str() method which returns '<D:supportedlock>...</D:supportedlock>'
assert xml_output.contains('<D:supportedlock>') assert xml_output.contains('<D:supportedlock>')
// Detailed testing would need proper parsing of the XML to verify elements // Detailed testing would need proper parsing of the XML to verify elements
// For real implementation, test should check for: // For real implementation, test should check for:
// - lockentry elements // - lockentry elements
@@ -183,11 +182,11 @@ fn test_supported_lock_detailed() {
fn test_proppatch_request() { fn test_proppatch_request() {
// Create property to set // Create property to set
author_prop := CustomProperty{ author_prop := CustomProperty{
name: 'author' name: 'author'
value: 'Kristof' value: 'Kristof'
namespace: 'C' namespace: 'C'
} }
// Create XML for PROPPATCH request (set) // Create XML for PROPPATCH request (set)
proppatch_set := '<D:propertyupdate xmlns:D="DAV:" xmlns:C="http://example.com/customns"> proppatch_set := '<D:propertyupdate xmlns:D="DAV:" xmlns:C="http://example.com/customns">
<D:set> <D:set>
@@ -195,14 +194,13 @@ fn test_proppatch_request() {
${author_prop.xml_str()} ${author_prop.xml_str()}
</D:prop> </D:prop>
</D:set> </D:set>
</D:propertyupdate>' </D:propertyupdate>' // Check structure
// Check structure
assert proppatch_set.contains('<D:propertyupdate') assert proppatch_set.contains('<D:propertyupdate')
assert proppatch_set.contains('<D:set>') assert proppatch_set.contains('<D:set>')
assert proppatch_set.contains('<D:prop>') assert proppatch_set.contains('<D:prop>')
assert proppatch_set.contains('<C:author>Kristof</C:author>') assert proppatch_set.contains('<C:author>Kristof</C:author>')
// Create XML for PROPPATCH request (remove) // Create XML for PROPPATCH request (remove)
proppatch_remove := '<D:propertyupdate xmlns:D="DAV:"> proppatch_remove := '<D:propertyupdate xmlns:D="DAV:">
<D:remove> <D:remove>
@@ -211,7 +209,7 @@ fn test_proppatch_request() {
</D:prop> </D:prop>
</D:remove> </D:remove>
</D:propertyupdate>' </D:propertyupdate>'
// Check structure // Check structure
assert proppatch_remove.contains('<D:propertyupdate') assert proppatch_remove.contains('<D:propertyupdate')
assert proppatch_remove.contains('<D:remove>') assert proppatch_remove.contains('<D:remove>')
@@ -224,7 +222,7 @@ fn test_prop_name_listing() {
mut props := []Property{} mut props := []Property{}
props << DisplayName('file.txt') props << DisplayName('file.txt')
props << GetContentType('text/plain') props << GetContentType('text/plain')
// Generate propname response // Generate propname response
// Note: In a complete implementation, there would be a function to generate this XML // Note: In a complete implementation, there would be a function to generate this XML
// For testing purposes, we're manually creating the expected structure // For testing purposes, we're manually creating the expected structure
@@ -240,7 +238,7 @@ fn test_prop_name_listing() {
</D:propstat> </D:propstat>
</D:response> </D:response>
</D:multistatus>' </D:multistatus>'
// Verify structure // Verify structure
assert propname_response.contains('<D:multistatus') assert propname_response.contains('<D:multistatus')
assert propname_response.contains('<D:prop>') assert propname_response.contains('<D:prop>')
@@ -262,7 +260,7 @@ fn test_namespace_declarations() {
</D:propstat> </D:propstat>
</D:response> </D:response>
</D:multistatus>' </D:multistatus>'
// Verify key namespace elements // Verify key namespace elements
assert response_with_ns.contains('xmlns:D="DAV:"') assert response_with_ns.contains('xmlns:D="DAV:"')
assert response_with_ns.contains('xmlns:C="http://example.com/customns"') assert response_with_ns.contains('xmlns:C="http://example.com/customns"')
@@ -290,7 +288,7 @@ fn test_depth_header_responses() {
</D:propstat> </D:propstat>
</D:response> </D:response>
</D:multistatus>' </D:multistatus>'
// Verify structure contains multiple responses // Verify structure contains multiple responses
assert multi_response.contains('<D:response>') assert multi_response.contains('<D:response>')
assert multi_response.count('<D:response>') == 2 assert multi_response.count('<D:response>') == 2

View File

@@ -303,22 +303,22 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
// Check if this is a binary file upload based on content type // Check if this is a binary file upload based on content type
content_type := ctx.req.header.get(.content_type) or { '' } content_type := ctx.req.header.get(.content_type) or { '' }
is_binary := is_binary_content_type(content_type) is_binary := is_binary_content_type(content_type)
// Handle binary uploads directly // Handle binary uploads directly
if is_binary { if is_binary {
log.info('[WebDAV] Processing binary upload for ${path} (${content_type})') log.info('[WebDAV] Processing binary upload for ${path} (${content_type})')
// Handle the binary upload directly // Handle the binary upload directly
ctx.takeover_conn() ctx.takeover_conn()
// Process the request using standard methods // Process the request using standard methods
is_update := server.vfs.exists(path) is_update := server.vfs.exists(path)
// Return success response // Return success response
ctx.res.set_status(if is_update { .ok } else { .created }) ctx.res.set_status(if is_update { .ok } else { .created })
return veb.no_result() return veb.no_result()
} }
// For non-binary uploads, use the standard approach // For non-binary uploads, use the standard approach
// Handle parent directory // Handle parent directory
parent_path := path.all_before_last('/') parent_path := path.all_before_last('/')
@@ -345,13 +345,13 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
ctx.res.set_status(.conflict) ctx.res.set_status(.conflict)
return ctx.text('HTTP 409: Conflict - Cannot replace directory with file') return ctx.text('HTTP 409: Conflict - Cannot replace directory with file')
} }
// Create the file after deleting the directory // Create the file after deleting the directory
server.vfs.file_create(path) or { server.vfs.file_create(path) or {
log.error('[WebDAV] Failed to create file ${path} after deleting directory: ${err.msg()}') log.error('[WebDAV] Failed to create file ${path} after deleting directory: ${err.msg()}')
return ctx.server_error('Failed to create file: ${err.msg()}') return ctx.server_error('Failed to create file: ${err.msg()}')
} }
// Now it's not an update anymore // Now it's not an update anymore
is_update = false is_update = false
} }
@@ -602,22 +602,15 @@ fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result
fn is_binary_content_type(content_type string) bool { fn is_binary_content_type(content_type string) bool {
// Normalize the content type by converting to lowercase // Normalize the content type by converting to lowercase
normalized := content_type.to_lower() normalized := content_type.to_lower()
// Check for common binary file types // Check for common binary file types
return normalized.contains('application/octet-stream') || return normalized.contains('application/octet-stream')
(normalized.contains('application/') && ( || (normalized.contains('application/') && (normalized.contains('msword')
normalized.contains('msword') || || normalized.contains('excel') || normalized.contains('powerpoint')
normalized.contains('excel') || || normalized.contains('pdf') || normalized.contains('zip')
normalized.contains('powerpoint') || || normalized.contains('gzip') || normalized.contains('x-tar')
normalized.contains('pdf') || || normalized.contains('x-7z') || normalized.contains('x-rar')))
normalized.contains('zip') || || (normalized.contains('image/') && !normalized.contains('svg'))
normalized.contains('gzip') || || normalized.contains('audio/') || normalized.contains('video/')
normalized.contains('x-tar') || || normalized.contains('vnd.openxmlformats') // Office documents
normalized.contains('x-7z') ||
normalized.contains('x-rar')
)) ||
(normalized.contains('image/') && !normalized.contains('svg')) ||
normalized.contains('audio/') ||
normalized.contains('video/') ||
normalized.contains('vnd.openxmlformats') // Office documents
} }

View File

@@ -66,19 +66,35 @@ fn (mut server Server) get_entry_property(entry &vfs.FSEntry, name string) !Prop
property_name := if name.contains(':') { name.all_after(':') } else { name } property_name := if name.contains(':') { name.all_after(':') } else { name }
return match property_name { return match property_name {
'creationdate' { Property(CreationDate(format_iso8601(entry.get_metadata().created_time()))) } 'creationdate' {
'getetag' { Property(GetETag(entry.get_metadata().id.str())) } Property(CreationDate(format_iso8601(entry.get_metadata().created_time())))
'resourcetype' { Property(ResourceType(entry.is_dir())) } }
'getlastmodified', 'lastmodified_server' { 'getetag' {
Property(GetETag(entry.get_metadata().id.str()))
}
'resourcetype' {
Property(ResourceType(entry.is_dir()))
}
'getlastmodified', 'lastmodified_server' {
// Both standard getlastmodified and custom lastmodified_server properties // Both standard getlastmodified and custom lastmodified_server properties
// return the same information // return the same information
Property(GetLastModified(texttools.format_rfc1123(entry.get_metadata().modified_time()))) Property(GetLastModified(texttools.format_rfc1123(entry.get_metadata().modified_time())))
} }
'getcontentlength' { Property(GetContentLength(entry.get_metadata().size.str())) } 'getcontentlength' {
'quota-available-bytes' { Property(QuotaAvailableBytes(16184098816)) } Property(GetContentLength(entry.get_metadata().size.str()))
'quota-used-bytes' { Property(QuotaUsedBytes(16184098816)) } }
'quotaused' { Property(QuotaUsed(16184098816)) } 'quota-available-bytes' {
'quota' { Property(Quota(16184098816)) } Property(QuotaAvailableBytes(16184098816))
}
'quota-used-bytes' {
Property(QuotaUsedBytes(16184098816))
}
'quotaused' {
Property(QuotaUsed(16184098816))
}
'quota' {
Property(Quota(16184098816))
}
'displayname' { 'displayname' {
// RFC 4918, Section 15.2: displayname is a human-readable name for UI display // RFC 4918, Section 15.2: displayname is a human-readable name for UI display
// For now, we use the filename as the displayname, but this could be enhanced // For now, we use the filename as the displayname, but this could be enhanced
@@ -102,7 +118,7 @@ fn (mut server Server) get_entry_property(entry &vfs.FSEntry, name string) !Prop
// Always show as unlocked for now to ensure compatibility // Always show as unlocked for now to ensure compatibility
Property(LockDiscovery('')) Property(LockDiscovery(''))
} }
else { else {
// For any unimplemented property, return an empty string instead of panicking // For any unimplemented property, return an empty string instead of panicking
// This improves compatibility with various WebDAV clients // This improves compatibility with various WebDAV clients
log.info('[WebDAV] Unimplemented property requested: ${name}') log.info('[WebDAV] Unimplemented property requested: ${name}')
@@ -127,16 +143,24 @@ fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, pat
} }
// main entry response // main entry response
responses << PropfindResponse{ responses << PropfindResponse{
href: ensure_leading_slash(if entry.is_dir() { '${path.trim_string_right('/')}/' } else { path }) href: ensure_leading_slash(if entry.is_dir() {
'${path.trim_string_right('/')}/'
} else {
path
})
// not_found: entry.get_unfound_properties(req) // not_found: entry.get_unfound_properties(req)
found_props: properties found_props: properties
} }
} else { } else {
responses << PropfindResponse{ responses << PropfindResponse{
href: ensure_leading_slash(if entry.is_dir() { '${path.trim_string_right('/')}/' } else { path }) href: ensure_leading_slash(if entry.is_dir() {
// not_found: entry.get_unfound_properties(req) '${path.trim_string_right('/')}/'
found_props: server.get_properties(entry) } else {
} path
})
// not_found: entry.get_unfound_properties(req)
found_props: server.get_properties(entry)
}
} }
if !entry.is_dir() || req.depth == .zero { if !entry.is_dir() || req.depth == .zero {
@@ -148,10 +172,10 @@ fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, pat
return responses return responses
} }
for e in entries { for e in entries {
child_path := if path.ends_with('/') { child_path := if path.ends_with('/') {
path + e.get_metadata().name path + e.get_metadata().name
} else { } else {
path + '/' + e.get_metadata().name path + '/' + e.get_metadata().name
} }
responses << server.get_responses(e, PropfindRequest{ responses << server.get_responses(e, PropfindRequest{
...req ...req

View File

@@ -487,11 +487,12 @@ fn test_server_propfind() ! {
assert ctx.res.header.get(.content_type)! == 'application/xml' assert ctx.res.header.get(.content_type)! == 'application/xml'
assert ctx.res.body.contains('<D:multistatus') assert ctx.res.body.contains('<D:multistatus')
assert ctx.res.body.contains('<D:response>') assert ctx.res.body.contains('<D:response>')
// Now that we know the correct format, check for it - directories have both leading and trailing slashes // Now that we know the correct format, check for it - directories have both leading and trailing slashes
assert ctx.res.body.contains('<D:href>/${root_dir}/</D:href>') assert ctx.res.body.contains('<D:href>/${root_dir}/</D:href>')
// Should only include the requested resource // Should only include the requested resource
assert !ctx.res.body.contains('<D:href>/${file_in_root}</D:href>') && !ctx.res.body.contains('<D:href>/${file_in_root}') assert !ctx.res.body.contains('<D:href>/${file_in_root}</D:href>')
&& !ctx.res.body.contains('<D:href>/${file_in_root}')
// Test PROPFIND with depth=1 (resource and immediate children) // Test PROPFIND with depth=1 (resource and immediate children)
mut ctx2 := Context{ mut ctx2 := Context{

View File

@@ -10,7 +10,7 @@ import freeflowuniverse.herolib.core.redisclient
__global ( __global (
circle_global map[string]&CircleCoordinator circle_global map[string]&CircleCoordinator
circle_default string circle_default string
action_queues map[string]&ActionQueue action_queues map[string]&ActionQueue
) )
// HeroRunner is the main factory for managing jobs, agents, services, circles and names // HeroRunner is the main factory for managing jobs, agents, services, circles and names
@@ -101,7 +101,7 @@ pub fn new(args_ CircleCoordinatorArgs) !&CircleCoordinator {
@[params] @[params]
pub struct ActionQueueArgs { pub struct ActionQueueArgs {
pub mut: pub mut:
name string = 'default' // Name of the queue name string = 'default' // Name of the queue
redis_addr string // Redis server address, defaults to 'localhost:6379' redis_addr string // Redis server address, defaults to 'localhost:6379'
} }
@@ -109,48 +109,48 @@ pub mut:
pub fn new_action_queue(args ActionQueueArgs) !&ActionQueue { pub fn new_action_queue(args ActionQueueArgs) !&ActionQueue {
// Normalize the queue name // Normalize the queue name
queue_name := texttools.name_fix(args.name) queue_name := texttools.name_fix(args.name)
// Check if queue already exists in global map // Check if queue already exists in global map
if queue_name in action_queues { if queue_name in action_queues {
mut q := action_queues[queue_name] or { panic('bug') } mut q := action_queues[queue_name] or { panic('bug') }
return q return q
} }
// Set default Redis address if not provided // Set default Redis address if not provided
mut redis_addr := args.redis_addr mut redis_addr := args.redis_addr
if redis_addr == '' { if redis_addr == '' {
redis_addr = 'localhost:6379' redis_addr = 'localhost:6379'
} }
// Create Redis client // Create Redis client
mut redis := redisclient.new(redis_addr)! mut redis := redisclient.new(redis_addr)!
// Create Redis queue // Create Redis queue
queue_key := 'actionqueue:${queue_name}' queue_key := 'actionqueue:${queue_name}'
mut redis_queue := redis.queue_get(queue_key) mut redis_queue := redis.queue_get(queue_key)
// Create ActionQueue // Create ActionQueue
mut action_queue := &ActionQueue{ mut action_queue := &ActionQueue{
name: queue_name name: queue_name
queue: &redis_queue queue: &redis_queue
redis: redis redis: redis
} }
// Store in global map // Store in global map
action_queues[queue_name] = action_queue action_queues[queue_name] = action_queue
return action_queue return action_queue
} }
// get_action_queue retrieves an existing ActionQueue or creates a new one // get_action_queue retrieves an existing ActionQueue or creates a new one
pub fn get_action_queue(name string) !&ActionQueue { pub fn get_action_queue(name string) !&ActionQueue {
queue_name := texttools.name_fix(name) queue_name := texttools.name_fix(name)
if queue_name in action_queues { if queue_name in action_queues {
mut q := action_queues[queue_name] or { panic('bug') } mut q := action_queues[queue_name] or { panic('bug') }
return q return q
} }
return new_action_queue(ActionQueueArgs{ return new_action_queue(ActionQueueArgs{
name: queue_name name: queue_name
})! })!
@@ -159,17 +159,17 @@ pub fn get_action_queue(name string) !&ActionQueue {
// get_or_create_action_queue retrieves an existing ActionQueue for a CircleCoordinator or creates a new one // get_or_create_action_queue retrieves an existing ActionQueue for a CircleCoordinator or creates a new one
pub fn (mut cc CircleCoordinator) get_or_create_action_queue(name string) !&ActionQueue { pub fn (mut cc CircleCoordinator) get_or_create_action_queue(name string) !&ActionQueue {
queue_name := texttools.name_fix(name) queue_name := texttools.name_fix(name)
if queue_name in cc.action_queues { if queue_name in cc.action_queues {
mut q := cc.action_queues[queue_name] or { panic('bug') } mut q := cc.action_queues[queue_name] or { panic('bug') }
return q return q
} }
mut action_queue := new_action_queue(ActionQueueArgs{ mut action_queue := new_action_queue(ActionQueueArgs{
name: queue_name name: queue_name
})! })!
cc.action_queues[queue_name] = action_queue cc.action_queues[queue_name] = action_queue
return action_queue return action_queue
} }

View File

@@ -19,14 +19,14 @@ pub enum ActionJobStatus {
@[heap] @[heap]
pub struct ActionJob { pub struct ActionJob {
pub mut: pub mut:
guid string guid string
heroscript string heroscript string
created ourtime.OurTime created ourtime.OurTime
deadline ourtime.OurTime deadline ourtime.OurTime
status ActionJobStatus status ActionJobStatus
error string // Error message if job failed error string // Error message if job failed
async bool // Whether the job should be processed asynchronously async bool // Whether the job should be processed asynchronously
circleid string // ID of the circle this job belongs to circleid string // ID of the circle this job belongs to
} }
// ActionQueue is a queue of actions to be processed, which comes from a redis queue // ActionQueue is a queue of actions to be processed, which comes from a redis queue
@@ -44,15 +44,15 @@ pub fn new_action_job(heroscript string) ActionJob {
// Default deadline is 1 hour from now // Default deadline is 1 hour from now
mut deadline := ourtime.now() mut deadline := ourtime.now()
deadline.warp('+1h') or { panic('Failed to set deadline: ${err}') } deadline.warp('+1h') or { panic('Failed to set deadline: ${err}') }
return ActionJob{ return ActionJob{
guid: time.now().unix_milli().str(), guid: time.now().unix_milli().str()
heroscript: heroscript, heroscript: heroscript
created: now, created: now
deadline: deadline, deadline: deadline
status: .pending, status: .pending
async: false, async: false
circleid: '' circleid: ''
} }
} }
@@ -78,15 +78,15 @@ pub fn (job ActionJob) to_playbook() !&playbook.PlayBook {
if job.heroscript.trim_space() == '' { if job.heroscript.trim_space() == '' {
return error('No heroscript content in job') return error('No heroscript content in job')
} }
// Create a new PlayBook with the heroscript content // Create a new PlayBook with the heroscript content
mut pb := playbook.new(text: job.heroscript)! mut pb := playbook.new(text: job.heroscript)!
// Check if any actions were found // Check if any actions were found
if pb.actions.len == 0 { if pb.actions.len == 0 {
return error('No actions found in heroscript') return error('No actions found in heroscript')
} }
return &pb return &pb
} }
@@ -104,7 +104,7 @@ pub fn (mut q ActionQueue) add_job(job ActionJob) ! {
if job.error != '' { if job.error != '' {
q.redis.hset(job_key, 'error', job.error)! q.redis.hset(job_key, 'error', job.error)!
} }
// Add the job reference to the queue // Add the job reference to the queue
q.queue.add(job.guid)! q.queue.add(job.guid)!
} }
@@ -112,32 +112,32 @@ pub fn (mut q ActionQueue) add_job(job ActionJob) ! {
// get_job retrieves a job from Redis by its GUID // get_job retrieves a job from Redis by its GUID
pub fn (mut q ActionQueue) get_job(guid string) !ActionJob { pub fn (mut q ActionQueue) get_job(guid string) !ActionJob {
job_key := 'heroactionjobs:${guid}' job_key := 'heroactionjobs:${guid}'
// Check if the job exists // Check if the job exists
if !q.redis.exists(job_key)! { if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found') return error('Job with GUID ${guid} not found')
} }
// Retrieve job fields // Retrieve job fields
mut job := ActionJob{ mut job := ActionJob{
guid: guid, guid: guid
heroscript: q.redis.hget(job_key, 'heroscript')!, heroscript: q.redis.hget(job_key, 'heroscript')!
status: ActionJobStatus.pending, // Default value, will be overwritten status: ActionJobStatus.pending // Default value, will be overwritten
error: '', // Default empty error message error: '' // Default empty error message
async: false, // Default to synchronous async: false // Default to synchronous
circleid: '' // Default to empty circle ID circleid: '' // Default to empty circle ID
} }
// Parse created time // Parse created time
created_str := q.redis.hget(job_key, 'created')! created_str := q.redis.hget(job_key, 'created')!
created_unix := created_str.i64() created_unix := created_str.i64()
job.created = ourtime.new_from_epoch(u64(created_unix)) job.created = ourtime.new_from_epoch(u64(created_unix))
// Parse deadline // Parse deadline
deadline_str := q.redis.hget(job_key, 'deadline')! deadline_str := q.redis.hget(job_key, 'deadline')!
deadline_unix := deadline_str.i64() deadline_unix := deadline_str.i64()
job.deadline = ourtime.new_from_epoch(u64(deadline_unix)) job.deadline = ourtime.new_from_epoch(u64(deadline_unix))
// Parse status // Parse status
status_str := q.redis.hget(job_key, 'status')! status_str := q.redis.hget(job_key, 'status')!
match status_str { match status_str {
@@ -148,29 +148,29 @@ pub fn (mut q ActionQueue) get_job(guid string) !ActionJob {
'cancelled' { job.status = .cancelled } 'cancelled' { job.status = .cancelled }
else { job.status = .pending } // Default to pending if unknown else { job.status = .pending } // Default to pending if unknown
} }
// Get error message if exists // Get error message if exists
job.error = q.redis.hget(job_key, 'error') or { '' } job.error = q.redis.hget(job_key, 'error') or { '' }
// Get async flag // Get async flag
async_str := q.redis.hget(job_key, 'async') or { 'false' } async_str := q.redis.hget(job_key, 'async') or { 'false' }
job.async = async_str == 'true' job.async = async_str == 'true'
// Get circle ID // Get circle ID
job.circleid = q.redis.hget(job_key, 'circleid') or { '' } job.circleid = q.redis.hget(job_key, 'circleid') or { '' }
return job return job
} }
// update_job_status updates the status of a job in Redis // update_job_status updates the status of a job in Redis
pub fn (mut q ActionQueue) update_job_status(guid string, status ActionJobStatus) ! { pub fn (mut q ActionQueue) update_job_status(guid string, status ActionJobStatus) ! {
job_key := 'heroactionjobs:${guid}' job_key := 'heroactionjobs:${guid}'
// Check if the job exists // Check if the job exists
if !q.redis.exists(job_key)! { if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found') return error('Job with GUID ${guid} not found')
} }
// Update status // Update status
q.redis.hset(job_key, 'status', status.str())! q.redis.hset(job_key, 'status', status.str())!
} }
@@ -178,12 +178,12 @@ pub fn (mut q ActionQueue) update_job_status(guid string, status ActionJobStatus
// set_job_failed marks a job as failed with an error message // set_job_failed marks a job as failed with an error message
pub fn (mut q ActionQueue) set_job_failed(guid string, error_msg string) ! { pub fn (mut q ActionQueue) set_job_failed(guid string, error_msg string) ! {
job_key := 'heroactionjobs:${guid}' job_key := 'heroactionjobs:${guid}'
// Check if the job exists // Check if the job exists
if !q.redis.exists(job_key)! { if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found') return error('Job with GUID ${guid} not found')
} }
// Update status and error message // Update status and error message
q.redis.hset(job_key, 'status', ActionJobStatus.failed.str())! q.redis.hset(job_key, 'status', ActionJobStatus.failed.str())!
q.redis.hset(job_key, 'error', error_msg)! q.redis.hset(job_key, 'error', error_msg)!
@@ -202,32 +202,32 @@ pub fn (mut q ActionQueue) find_failed_jobs() ![]ActionJob {
// and replaced with a more efficient implementation using SCAN // and replaced with a more efficient implementation using SCAN
keys := q.redis.keys('heroactionjobs:*')! keys := q.redis.keys('heroactionjobs:*')!
mut failed_jobs := []ActionJob{} mut failed_jobs := []ActionJob{}
for key in keys { for key in keys {
// Check if job is failed // Check if job is failed
status := q.redis.hget(key, 'status') or { continue } status := q.redis.hget(key, 'status') or { continue }
if status == ActionJobStatus.failed.str() { if status == ActionJobStatus.failed.str() {
// Get the job GUID from the key // Get the job GUID from the key
guid := key.all_after('heroactionjobs:') guid := key.all_after('heroactionjobs:')
// Get the full job // Get the full job
job := q.get_job(guid) or { continue } job := q.get_job(guid) or { continue }
failed_jobs << job failed_jobs << job
} }
} }
return failed_jobs return failed_jobs
} }
// delete_job deletes a job from Redis // delete_job deletes a job from Redis
pub fn (mut q ActionQueue) delete_job(guid string) ! { pub fn (mut q ActionQueue) delete_job(guid string) ! {
job_key := 'heroactionjobs:${guid}' job_key := 'heroactionjobs:${guid}'
// Check if the job exists // Check if the job exists
if !q.redis.exists(job_key)! { if !q.redis.exists(job_key)! {
return error('Job with GUID ${guid} not found') return error('Job with GUID ${guid} not found')
} }
// Delete the job // Delete the job
q.redis.del(job_key)! q.redis.del(job_key)!
} }

View File

@@ -7,26 +7,26 @@ fn test_action_job() {
// Create a new action job // Create a new action job
heroscript := '!!action.test name:test1' heroscript := '!!action.test name:test1'
job := new_action_job(heroscript) job := new_action_job(heroscript)
// Verify job properties // Verify job properties
assert job.guid != '' assert job.guid != ''
assert job.heroscript == heroscript assert job.heroscript == heroscript
assert job.status == ActionJobStatus.pending assert job.status == ActionJobStatus.pending
assert !job.created.empty() assert !job.created.empty()
assert !job.deadline.empty() assert !job.deadline.empty()
// Test JSON serialization // Test JSON serialization
json_str := job.to_json() json_str := job.to_json()
job2 := action_job_from_json(json_str) or { job2 := action_job_from_json(json_str) or {
assert false, 'Failed to decode job from JSON: ${err}' assert false, 'Failed to decode job from JSON: ${err}'
return return
} }
// Verify deserialized job // Verify deserialized job
assert job2.guid == job.guid assert job2.guid == job.guid
assert job2.heroscript == job.heroscript assert job2.heroscript == job.heroscript
assert job2.status == job.status assert job2.status == job.status
// Test creating job with custom deadline // Test creating job with custom deadline
job3 := new_action_job_with_deadline(heroscript, '+2h') or { job3 := new_action_job_with_deadline(heroscript, '+2h') or {
assert false, 'Failed to create job with deadline: ${err}' assert false, 'Failed to create job with deadline: ${err}'
@@ -41,7 +41,7 @@ fn test_action_queue() {
println('Skipping Redis test (use -d test_with_redis to run)') println('Skipping Redis test (use -d test_with_redis to run)')
return return
} }
// Create a new action queue // Create a new action queue
queue_name := 'test_queue_${time.now().unix_milli()}' queue_name := 'test_queue_${time.now().unix_milli()}'
mut queue := new_action_queue(ActionQueueArgs{ mut queue := new_action_queue(ActionQueueArgs{
@@ -50,13 +50,13 @@ fn test_action_queue() {
assert false, 'Failed to create action queue: ${err}' assert false, 'Failed to create action queue: ${err}'
return return
} }
// Create test jobs // Create test jobs
mut job1 := new_action_job('!!action.test1 name:test1') mut job1 := new_action_job('!!action.test1 name:test1')
mut job2 := new_action_job('!!action.test2 name:test2') mut job2 := new_action_job('!!action.test2 name:test2')
mut job3 := new_action_job('!!action.test3 name:test3') mut job3 := new_action_job('!!action.test3 name:test3')
mut job4 := new_action_job('!!action.test4 name:test4') mut job4 := new_action_job('!!action.test4 name:test4')
// Add jobs to the queue // Add jobs to the queue
queue.add_job(job1) or { queue.add_job(job1) or {
assert false, 'Failed to add job1: ${err}' assert false, 'Failed to add job1: ${err}'
@@ -70,14 +70,14 @@ fn test_action_queue() {
assert false, 'Failed to add job3: ${err}' assert false, 'Failed to add job3: ${err}'
return return
} }
// Test count_waiting_jobs // Test count_waiting_jobs
wait_count := queue.count_waiting_jobs() or { wait_count := queue.count_waiting_jobs() or {
assert false, 'Failed to count waiting jobs: ${err}' assert false, 'Failed to count waiting jobs: ${err}'
return return
} }
assert wait_count == 3, 'Expected 3 waiting jobs, got ${wait_count}' assert wait_count == 3, 'Expected 3 waiting jobs, got ${wait_count}'
// Fetch jobs from the queue // Fetch jobs from the queue
fetched_job1 := queue.pop_job() or { fetched_job1 := queue.pop_job() or {
assert false, 'Failed to pop job1: ${err}' assert false, 'Failed to pop job1: ${err}'
@@ -85,20 +85,20 @@ fn test_action_queue() {
} }
assert fetched_job1.guid == job1.guid assert fetched_job1.guid == job1.guid
assert fetched_job1.heroscript == job1.heroscript assert fetched_job1.heroscript == job1.heroscript
fetched_job2 := queue.pop_job() or { fetched_job2 := queue.pop_job() or {
assert false, 'Failed to pop job2: ${err}' assert false, 'Failed to pop job2: ${err}'
return return
} }
assert fetched_job2.guid == job2.guid assert fetched_job2.guid == job2.guid
assert fetched_job2.heroscript == job2.heroscript assert fetched_job2.heroscript == job2.heroscript
// Update job status // Update job status
queue.update_job_status(job3.guid, .processing) or { queue.update_job_status(job3.guid, .processing) or {
assert false, 'Failed to update job status: ${err}' assert false, 'Failed to update job status: ${err}'
return return
} }
// Fetch job with updated status // Fetch job with updated status
fetched_job3 := queue.pop_job() or { fetched_job3 := queue.pop_job() or {
assert false, 'Failed to pop job3: ${err}' assert false, 'Failed to pop job3: ${err}'
@@ -106,19 +106,19 @@ fn test_action_queue() {
} }
assert fetched_job3.guid == job3.guid assert fetched_job3.guid == job3.guid
assert fetched_job3.status == .processing assert fetched_job3.status == .processing
// Test setting a job as failed with error message // Test setting a job as failed with error message
queue.add_job(job4) or { queue.add_job(job4) or {
assert false, 'Failed to add job4: ${err}' assert false, 'Failed to add job4: ${err}'
return return
} }
// Set job as failed // Set job as failed
queue.set_job_failed(job4.guid, 'Test error message') or { queue.set_job_failed(job4.guid, 'Test error message') or {
assert false, 'Failed to set job as failed: ${err}' assert false, 'Failed to set job as failed: ${err}'
return return
} }
// Get the failed job and verify error message // Get the failed job and verify error message
failed_job := queue.get_job(job4.guid) or { failed_job := queue.get_job(job4.guid) or {
assert false, 'Failed to get failed job: ${err}' assert false, 'Failed to get failed job: ${err}'
@@ -126,7 +126,7 @@ fn test_action_queue() {
} }
assert failed_job.status == .failed assert failed_job.status == .failed
assert failed_job.error == 'Test error message' assert failed_job.error == 'Test error message'
// Test finding failed jobs // Test finding failed jobs
failed_jobs := queue.find_failed_jobs() or { failed_jobs := queue.find_failed_jobs() or {
assert false, 'Failed to find failed jobs: ${err}' assert false, 'Failed to find failed jobs: ${err}'
@@ -135,39 +135,39 @@ fn test_action_queue() {
assert failed_jobs.len > 0, 'Expected at least one failed job' assert failed_jobs.len > 0, 'Expected at least one failed job'
assert failed_jobs[0].guid == job4.guid assert failed_jobs[0].guid == job4.guid
assert failed_jobs[0].error == 'Test error message' assert failed_jobs[0].error == 'Test error message'
// Delete a job // Delete a job
queue.delete_job(job3.guid) or { queue.delete_job(job3.guid) or {
assert false, 'Failed to delete job: ${err}' assert false, 'Failed to delete job: ${err}'
return return
} }
// Try to get deleted job (should fail) // Try to get deleted job (should fail)
queue.get_job(job3.guid) or { queue.get_job(job3.guid) or {
// Expected error // Expected error
assert err.str().contains('not found') assert err.str().contains('not found')
return return
} }
// Test direct put and fetch to verify heroscript preservation // Test direct put and fetch to verify heroscript preservation
test_heroscript := '!!action.special name:direct_test param1:value1 param2:value2' test_heroscript := '!!action.special name:direct_test param1:value1 param2:value2'
mut direct_job := new_action_job(test_heroscript) mut direct_job := new_action_job(test_heroscript)
// Add the job // Add the job
queue.add_job(direct_job) or { queue.add_job(direct_job) or {
assert false, 'Failed to add direct job: ${err}' assert false, 'Failed to add direct job: ${err}'
return return
} }
// Fetch the job by GUID // Fetch the job by GUID
fetched_direct_job := queue.get_job(direct_job.guid) or { fetched_direct_job := queue.get_job(direct_job.guid) or {
assert false, 'Failed to get direct job: ${err}' assert false, 'Failed to get direct job: ${err}'
return return
} }
// Verify the heroscript is preserved exactly // Verify the heroscript is preserved exactly
assert fetched_direct_job.heroscript == test_heroscript, 'Heroscript was not preserved correctly' assert fetched_direct_job.heroscript == test_heroscript, 'Heroscript was not preserved correctly'
// Clean up // Clean up
queue.delete() or { queue.delete() or {
assert false, 'Failed to delete queue: ${err}' assert false, 'Failed to delete queue: ${err}'

View File

@@ -40,6 +40,7 @@ pub fn (mut m DBHandler[T]) get_data(id u32) ![]u8 {
} }
return item_data return item_data
} }
pub fn (mut m DBHandler[T]) exists(id u32) !bool { pub fn (mut m DBHandler[T]) exists(id u32) !bool {
item_data := m.session_state.dbs.db_data_core.get(id) or { return false } item_data := m.session_state.dbs.db_data_core.get(id) or { return false }
return item_data != []u8{} return item_data != []u8{}

View File

@@ -1,7 +1,8 @@
module circle module circle
import freeflowuniverse.herolib.hero.db.core { DBHandler, SessionState, new_dbhandler } import freeflowuniverse.herolib.hero.db.core { DBHandler, SessionState, new_dbhandler }
import freeflowuniverse.herolib.hero.db.models.circle { User, Role } import freeflowuniverse.herolib.hero.db.models.circle { Role, User }
type UserObj = User type UserObj = User
@[heap] @[heap]
@@ -55,7 +56,7 @@ pub fn (mut m UserDB) delete(obj UserObj) ! {
// get_by_name retrieves a user by its name // get_by_name retrieves a user by its name
pub fn (mut m UserDB) get_by_name(name string) !UserObj { pub fn (mut m UserDB) get_by_name(name string) !UserObj {
data := m.db.get_data_by_key('name', name)! data := m.db.get_data_by_key('name', name)!
return loads_user(data)! return loads_user(data)!
} }
// delete_by_name removes a user by its name // delete_by_name removes a user by its name
@@ -80,4 +81,4 @@ pub fn (mut m UserDB) update_user_role(name string, new_role Role) !UserObj {
// Save the updated user // Save the updated user
return m.set(user)! return m.set(user)!
} }

View File

@@ -1,83 +1,80 @@
module circle module circle
import freeflowuniverse.herolib.data.encoder import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.hero.db.models.circle { User, Role } import freeflowuniverse.herolib.hero.db.models.circle { Role, User }
// dumps serializes a User struct to binary data // dumps serializes a User struct to binary data
pub fn (user UserObj) dumps() ![]u8 { pub fn (user UserObj) dumps() ![]u8 {
mut e := encoder.new() mut e := encoder.new()
// Add version byte (v1) // Add version byte (v1)
e.add_u8(1) e.add_u8(1)
// Encode Base struct fields // Encode Base struct fields
e.add_u32(user.Base.id) e.add_u32(user.Base.id)
e.add_ourtime(user.Base.creation_time) e.add_ourtime(user.Base.creation_time)
e.add_ourtime(user.Base.mod_time) e.add_ourtime(user.Base.mod_time)
// Encode comments array from Base // Encode comments array from Base
e.add_u16(u16(user.Base.comments.len)) e.add_u16(u16(user.Base.comments.len))
for id in user.Base.comments { for id in user.Base.comments {
e.add_u32(id) e.add_u32(id)
} }
// Encode User-specific fields // Encode User-specific fields
e.add_string(user.name) e.add_string(user.name)
e.add_string(user.description) e.add_string(user.description)
e.add_u8(u8(user.role)) // Encode enum as u8 e.add_u8(u8(user.role)) // Encode enum as u8
// Encode contact_ids array // Encode contact_ids array
e.add_u16(u16(user.contact_ids.len)) e.add_u16(u16(user.contact_ids.len))
for id in user.contact_ids { for id in user.contact_ids {
e.add_u32(id) e.add_u32(id)
} }
// Encode wallet_ids array // Encode wallet_ids array
e.add_u16(u16(user.wallet_ids.len)) e.add_u16(u16(user.wallet_ids.len))
for id in user.wallet_ids { for id in user.wallet_ids {
e.add_u32(id) e.add_u32(id)
} }
// Encode pubkey // Encode pubkey
e.add_string(user.pubkey) e.add_string(user.pubkey)
return e.data return e.data
} }
// loads deserializes binary data to a User struct // loads deserializes binary data to a User struct
pub fn loads_user(data []u8) !User { pub fn loads_user(data []u8) !User {
mut d := encoder.decoder_new(data) mut d := encoder.decoder_new(data)
// Read version byte // Read version byte
version := d.get_u8()! version := d.get_u8()!
if version != 1 { if version != 1 {
return error('Unsupported version: ${version}') return error('Unsupported version: ${version}')
} }
// Create a new User instance // Create a new User instance
mut user := User{} mut user := User{}
// Decode Base struct fields // Decode Base struct fields
user.id = d.get_u32()! user.id = d.get_u32()!
user.creation_time = d.get_ourtime()! user.creation_time = d.get_ourtime()!
user.mod_time = d.get_ourtime()! user.mod_time = d.get_ourtime()!
// Decode comments array from Base // Decode comments array from Base
comments_count := d.get_u16()! comments_count := d.get_u16()!
user.comments = []u32{cap: int(comments_count)} user.comments = []u32{cap: int(comments_count)}
for _ in 0 .. comments_count { for _ in 0 .. comments_count {
user.comments << d.get_u32()! user.comments << d.get_u32()!
} }
// Decode User-specific fields // Decode User-specific fields
user.name = d.get_string()! user.name = d.get_string()!
user.description = d.get_string()! user.description = d.get_string()!
// Get the u8 value first // Get the u8 value first
role_value := d.get_u8()! role_value := d.get_u8()!
// Validate and convert to Role enum // Validate and convert to Role enum
if role_value <= u8(Role.external) { if role_value <= u8(Role.external) {
// Use unsafe block for casting number to enum as required by V // Use unsafe block for casting number to enum as required by V
@@ -87,23 +84,23 @@ pub fn loads_user(data []u8) !User {
} else { } else {
return error('Invalid role value: ${role_value}') return error('Invalid role value: ${role_value}')
} }
// Decode contact_ids array // Decode contact_ids array
contact_count := d.get_u16()! contact_count := d.get_u16()!
user.contact_ids = []u32{cap: int(contact_count)} user.contact_ids = []u32{cap: int(contact_count)}
for _ in 0 .. contact_count { for _ in 0 .. contact_count {
user.contact_ids << d.get_u32()! user.contact_ids << d.get_u32()!
} }
// Decode wallet_ids array // Decode wallet_ids array
wallet_count := d.get_u16()! wallet_count := d.get_u16()!
user.wallet_ids = []u32{cap: int(wallet_count)} user.wallet_ids = []u32{cap: int(wallet_count)}
for _ in 0 .. wallet_count { for _ in 0 .. wallet_count {
user.wallet_ids << d.get_u32()! user.wallet_ids << d.get_u32()!
} }
// Decode pubkey // Decode pubkey
user.pubkey = d.get_string()! user.pubkey = d.get_string()!
return user return user
} }

View File

@@ -1,6 +1,6 @@
module circle module circle
import freeflowuniverse.herolib.hero.db.core { SessionState, new_session } import freeflowuniverse.herolib.hero.db.core { new_session }
import freeflowuniverse.herolib.hero.db.models.circle { Role } import freeflowuniverse.herolib.hero.db.models.circle { Role }
import freeflowuniverse.herolib.data.ourtime import freeflowuniverse.herolib.data.ourtime
import os import os
@@ -8,7 +8,7 @@ import os
// test_user_db tests the functionality of the UserDB // test_user_db tests the functionality of the UserDB
pub fn test_user_db() ! { pub fn test_user_db() ! {
println('Starting User DB Test') println('Starting User DB Test')
// Create a temporary directory for the test // Create a temporary directory for the test
test_dir := os.join_path(os.temp_dir(), 'hero_user_test') test_dir := os.join_path(os.temp_dir(), 'hero_user_test')
os.mkdir_all(test_dir) or { return error('Failed to create test directory: ${err}') } os.mkdir_all(test_dir) or { return error('Failed to create test directory: ${err}') }
@@ -16,20 +16,20 @@ pub fn test_user_db() ! {
// Clean up after test // Clean up after test
os.rmdir_all(test_dir) or { eprintln('Failed to remove test directory: ${err}') } os.rmdir_all(test_dir) or { eprintln('Failed to remove test directory: ${err}') }
} }
// Create a new session state // Create a new session state
mut session := new_session( mut session := new_session(
name: 'test_session' name: 'test_session'
path: test_dir path: test_dir
)! )!
println('Session created: ${session.name}') println('Session created: ${session.name}')
// Initialize the UserDB // Initialize the UserDB
mut user_db := new_userdb(session)! mut user_db := new_userdb(session)!
println('UserDB initialized') println('UserDB initialized')
// Create and add users // Create and add users
mut admin_user := user_db.new() mut admin_user := user_db.new()
admin_user.name = 'admin_user' admin_user.name = 'admin_user'
@@ -41,11 +41,11 @@ pub fn test_user_db() ! {
// println(admin_user) // println(admin_user)
// if true{panic("sss")} // if true{panic("sss")}
// Save the admin user // Save the admin user
admin_user = user_db.set(admin_user)! admin_user = user_db.set(admin_user)!
println('Admin user created with ID: ${admin_user.Base.id}') println('Admin user created with ID: ${admin_user.Base.id}')
// Create a regular member // Create a regular member
mut member_user := user_db.new() mut member_user := user_db.new()
member_user.name = 'member_user' member_user.name = 'member_user'
@@ -54,11 +54,11 @@ pub fn test_user_db() ! {
member_user.pubkey = 'member_pubkey_456' member_user.pubkey = 'member_pubkey_456'
member_user.creation_time = ourtime.now() member_user.creation_time = ourtime.now()
member_user.mod_time = ourtime.now() member_user.mod_time = ourtime.now()
// Save the member user // Save the member user
member_user = user_db.set(member_user)! member_user = user_db.set(member_user)!
println('Member user created with ID: ${member_user.Base.id}') println('Member user created with ID: ${member_user.Base.id}')
// Create a guest user // Create a guest user
mut guest_user := user_db.new() mut guest_user := user_db.new()
guest_user.name = 'guest_user' guest_user.name = 'guest_user'
@@ -67,48 +67,47 @@ pub fn test_user_db() ! {
guest_user.pubkey = 'guest_pubkey_789' guest_user.pubkey = 'guest_pubkey_789'
guest_user.creation_time = ourtime.now() guest_user.creation_time = ourtime.now()
guest_user.mod_time = ourtime.now() guest_user.mod_time = ourtime.now()
// Save the guest user // Save the guest user
guest_user = user_db.set(guest_user)! guest_user = user_db.set(guest_user)!
println('Guest user created with ID: ${guest_user.Base.id}') println('Guest user created with ID: ${guest_user.Base.id}')
// Retrieve users by ID // Retrieve users by ID
retrieved_admin := user_db.get(admin_user.Base.id)! retrieved_admin := user_db.get(admin_user.Base.id)!
println('Retrieved admin user by ID: ${retrieved_admin.name} (Role: ${retrieved_admin.role})') println('Retrieved admin user by ID: ${retrieved_admin.name} (Role: ${retrieved_admin.role})')
// Retrieve users by name // Retrieve users by name
retrieved_member := user_db.get_by_name('member_user')! retrieved_member := user_db.get_by_name('member_user')!
println('Retrieved member user by name: ${retrieved_member.name} (Role: ${retrieved_member.role})') println('Retrieved member user by name: ${retrieved_member.name} (Role: ${retrieved_member.role})')
// Update a user's role // Update a user's role
updated_guest := user_db.update_user_role('guest_user', Role.contributor)! updated_guest := user_db.update_user_role('guest_user', Role.contributor)!
println('Updated guest user role to contributor: ${updated_guest.name} (Role: ${updated_guest.role})') println('Updated guest user role to contributor: ${updated_guest.name} (Role: ${updated_guest.role})')
// List all users // List all users
user_ids := user_db.list()! user_ids := user_db.list()!
println('Total users: ${user_ids.len}') println('Total users: ${user_ids.len}')
println('User IDs: ${user_ids}') println('User IDs: ${user_ids}')
// Get all users // Get all users
all_users := user_db.getall()! all_users := user_db.getall()!
println('All users:') println('All users:')
for user in all_users { for user in all_users {
println(' - ${user.name} (ID: ${user.Base.id}, Role: ${user.role})') println(' - ${user.name} (ID: ${user.Base.id}, Role: ${user.role})')
} }
// Delete a user // Delete a user
user_db.delete(member_user)! user_db.delete(member_user)!
println('Deleted member user with ID: ${member_user.Base.id}') println('Deleted member user with ID: ${member_user.Base.id}')
// Delete a user by name // Delete a user by name
user_db.delete_by_name('guest_user')! user_db.delete_by_name('guest_user')!
println('Deleted guest user by name') println('Deleted guest user by name')
// List remaining users // List remaining users
remaining_user_ids := user_db.list()! remaining_user_ids := user_db.list()!
println('Remaining users: ${remaining_user_ids.len}') println('Remaining users: ${remaining_user_ids.len}')
println('Remaining user IDs: ${remaining_user_ids}') println('Remaining user IDs: ${remaining_user_ids}')
println('User DB Test completed successfully') println('User DB Test completed successfully')
} }

View File

@@ -5,9 +5,8 @@ import freeflowuniverse.herolib.data.ourtime
// our attempt to make a message object which can be used for email as well as chat // our attempt to make a message object which can be used for email as well as chat
pub struct Base { pub struct Base {
pub mut: pub mut:
id u32 id u32
creation_time ourtime.OurTime creation_time ourtime.OurTime
mod_time ourtime.OurTime // Last modified time mod_time ourtime.OurTime // Last modified time
comments []u32 comments []u32
} }

Some files were not shown because too many files have changed in this diff Show More