Compare commits
70 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 39c6c37dee | |||
| 3438f74e60 | |||
| 4f79712570 | |||
| 8e85ce0678 | |||
| ff09e7bf1b | |||
| 46e1c6706c | |||
| d8a59d0726 | |||
| 108d2019cd | |||
| 3682ef2420 | |||
| a066db6624 | |||
| 7458d64c05 | |||
| 2a1787f28f | |||
| de78c229ce | |||
|
|
f386c67acf | ||
|
|
75f98bf349 | ||
|
|
9870fcbc5d | ||
|
|
d2b8379505 | ||
|
|
2dcb97255c | ||
|
|
f7dd227cd0 | ||
| e2c18c3a24 | |||
| 1bc6c6eab8 | |||
|
|
4b39b137de | ||
| e5de293919 | |||
| 8a10374570 | |||
| ad37a041ab | |||
| 44daea4447 | |||
| 6989a4da13 | |||
| de4583691c | |||
| d8c9b07a51 | |||
| 54d31f40b2 | |||
| ec73b5ff34 | |||
| 9fcdcc3aff | |||
| 05ab2b68f4 | |||
| 79330ef8f5 | |||
| 45ed369a78 | |||
| 37c17fc7da | |||
| 23640d2647 | |||
| be54ec8302 | |||
| 638f81a781 | |||
| e00306b6f8 | |||
| 3fec1c38a1 | |||
| edc9e3c150 | |||
| a155122898 | |||
| f0552f38a0 | |||
|
|
f99419371a | ||
|
|
86d47c218b | ||
|
|
bd5cafbad7 | ||
|
|
b71362eb9a | ||
|
|
673d982360 | ||
|
|
712b46864a | ||
|
|
186c3aae59 | ||
|
|
0794fe948b | ||
|
|
ba2d6e4310 | ||
|
|
b9e5d14b48 | ||
|
|
bf26b0af1d | ||
|
|
8e82b2865b | ||
|
|
367340d69d | ||
|
|
0b77c73809 | ||
|
|
51b432d911 | ||
|
|
f7a679b2a3 | ||
|
|
15c9d60760 | ||
|
|
c69b53fd4e | ||
|
|
9cdab1f392 | ||
|
|
34656cf1f9 | ||
|
|
cf98822749 | ||
|
|
2335d14623 | ||
| 4a72698402 | |||
| fcb857f756 | |||
| 834c413cfc | |||
| da9965bdc6 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -39,6 +39,8 @@ data.ms/
|
||||
test_basic
|
||||
cli/hero
|
||||
.aider*
|
||||
storage/
|
||||
.qdrant-initialized
|
||||
.compile_cache
|
||||
compile_results.log
|
||||
tmp
|
||||
|
||||
@@ -1,391 +0,0 @@
|
||||
module developer
|
||||
|
||||
import freeflowuniverse.herolib.core.code
|
||||
import freeflowuniverse.herolib.mcp
|
||||
import os
|
||||
|
||||
// create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
|
||||
// returns an MCP Tool code in v for attaching the function to the mcp server
|
||||
pub fn (d &Developer) create_mcp_tool_code(function_name string, module_path string) !string {
|
||||
println('DEBUG: Looking for function ${function_name} in module path: ${module_path}')
|
||||
if !os.exists(module_path) {
|
||||
println('DEBUG: Module path does not exist: ${module_path}')
|
||||
return error('Module path does not exist: ${module_path}')
|
||||
}
|
||||
|
||||
function_ := get_function_from_module(module_path, function_name)!
|
||||
println('Function string found:\n${function_}')
|
||||
|
||||
// Try to parse the function
|
||||
function := code.parse_function(function_) or {
|
||||
println('Error parsing function: ${err}')
|
||||
return error('Failed to parse function: ${err}')
|
||||
}
|
||||
|
||||
mut types := map[string]string{}
|
||||
for param in function.params {
|
||||
// Check if the type is an Object (struct)
|
||||
if param.typ is code.Object {
|
||||
types[param.typ.symbol()] = get_type_from_module(module_path, param.typ.symbol())!
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result type if it's a struct
|
||||
mut result_ := ""
|
||||
if function.result.typ is code.Result {
|
||||
result_type := (function.result.typ as code.Result).typ
|
||||
if result_type is code.Object {
|
||||
result_ = get_type_from_module(module_path, result_type.symbol())!
|
||||
}
|
||||
} else if function.result.typ is code.Object {
|
||||
result_ = get_type_from_module(module_path, function.result.typ.symbol())!
|
||||
}
|
||||
|
||||
tool_name := function.name
|
||||
tool := d.create_mcp_tool(function_, types)!
|
||||
handler := d.create_mcp_tool_handler(function_, types, result_)!
|
||||
str := $tmpl('./templates/tool_code.v.template')
|
||||
return str
|
||||
}
|
||||
|
||||
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
|
||||
// function: The V function string including preceding comments
|
||||
// types: A map of struct names to their definitions for complex parameter types
|
||||
// result: The type of result of the create_mcp_tool function. Could be simply string, or struct {...}
|
||||
pub fn (d &Developer) create_mcp_tool_handler(function_ string, types map[string]string, result_ string) !string {
|
||||
function := code.parse_function(function_)!
|
||||
decode_stmts := function.params.map(argument_decode_stmt(it)).join_lines()
|
||||
|
||||
result := code.parse_type(result_)
|
||||
str := $tmpl('./templates/tool_handler.v.template')
|
||||
return str
|
||||
}
|
||||
|
||||
pub fn argument_decode_stmt(param code.Param) string {
|
||||
return if param.typ is code.Integer {
|
||||
'${param.name} := arguments["${param.name}"].int()'
|
||||
} else if param.typ is code.Boolean {
|
||||
'${param.name} := arguments["${param.name}"].bool()'
|
||||
} else if param.typ is code.String {
|
||||
'${param.name} := arguments["${param.name}"].str()'
|
||||
} else if param.typ is code.Object {
|
||||
'${param.name} := json.decode[${param.typ.symbol()}](arguments["${param.name}"].str())!'
|
||||
} else if param.typ is code.Array {
|
||||
'${param.name} := json.decode[${param.typ.symbol()}](arguments["${param.name}"].str())!'
|
||||
} else if param.typ is code.Map {
|
||||
'${param.name} := json.decode[${param.typ.symbol()}](arguments["${param.name}"].str())!'
|
||||
} else {
|
||||
panic('Unsupported type: ${param.typ}')
|
||||
}
|
||||
}
|
||||
/*
|
||||
in @generate_mcp.v , implement a create_mpc_tool_handler function that given a vlang function string and the types that map to their corresponding type definitions (for instance struct some_type: SomeType{...}), generates a vlang function such as the following:
|
||||
|
||||
ou
|
||||
pub fn (d &Developer) create_mcp_tool_tool_handler(arguments map[string]Any) !mcp.Tool {
|
||||
function := arguments['function'].str()
|
||||
types := json.decode[map[string]string](arguments['types'].str())!
|
||||
return d.create_mcp_tool(function, types)
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
|
||||
// function: The V function string including preceding comments
|
||||
// types: A map of struct names to their definitions for complex parameter types
|
||||
pub fn (d Developer) create_mcp_tool(function string, types map[string]string) !mcp.Tool {
|
||||
// Extract description from preceding comments
|
||||
mut description := ''
|
||||
lines := function.split('\n')
|
||||
|
||||
// Find function signature line
|
||||
mut fn_line_idx := -1
|
||||
for i, line in lines {
|
||||
if line.trim_space().starts_with('fn ') || line.trim_space().starts_with('pub fn ') {
|
||||
fn_line_idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fn_line_idx == -1 {
|
||||
return error('Invalid function: no function signature found')
|
||||
}
|
||||
|
||||
// Extract comments before the function
|
||||
for i := 0; i < fn_line_idx; i++ {
|
||||
line := lines[i].trim_space()
|
||||
if line.starts_with('//') {
|
||||
// Remove the comment marker and any leading space
|
||||
comment := line[2..].trim_space()
|
||||
if description != '' {
|
||||
description += '\n'
|
||||
}
|
||||
description += comment
|
||||
}
|
||||
}
|
||||
|
||||
// Parse function signature
|
||||
fn_signature := lines[fn_line_idx].trim_space()
|
||||
|
||||
// Extract function name
|
||||
mut fn_name := ''
|
||||
|
||||
// Check if this is a method with a receiver
|
||||
if fn_signature.contains('fn (') {
|
||||
// This is a method with a receiver
|
||||
// Format: [pub] fn (receiver Type) name(...)
|
||||
|
||||
// Find the closing parenthesis of the receiver
|
||||
mut receiver_end := fn_signature.index(')') or { return error('Invalid method signature: missing closing parenthesis for receiver') }
|
||||
|
||||
// Extract the text after the receiver
|
||||
mut after_receiver := fn_signature[receiver_end + 1..].trim_space()
|
||||
|
||||
// Extract the function name (everything before the opening parenthesis)
|
||||
mut params_start := after_receiver.index('(') or { return error('Invalid method signature: missing parameters') }
|
||||
fn_name = after_receiver[0..params_start].trim_space()
|
||||
} else if fn_signature.starts_with('pub fn ') {
|
||||
// Regular public function
|
||||
mut prefix_len := 'pub fn '.len
|
||||
mut params_start := fn_signature.index('(') or { return error('Invalid function signature: missing parameters') }
|
||||
fn_name = fn_signature[prefix_len..params_start].trim_space()
|
||||
} else if fn_signature.starts_with('fn ') {
|
||||
// Regular function
|
||||
mut prefix_len := 'fn '.len
|
||||
mut params_start := fn_signature.index('(') or { return error('Invalid function signature: missing parameters') }
|
||||
fn_name = fn_signature[prefix_len..params_start].trim_space()
|
||||
} else {
|
||||
return error('Invalid function signature: must start with "fn" or "pub fn"')
|
||||
}
|
||||
|
||||
if fn_name == '' {
|
||||
return error('Could not extract function name')
|
||||
}
|
||||
|
||||
// Extract parameters
|
||||
mut params_str := ''
|
||||
|
||||
// Check if this is a method with a receiver
|
||||
if fn_signature.contains('fn (') {
|
||||
// This is a method with a receiver
|
||||
// Find the closing parenthesis of the receiver
|
||||
mut receiver_end := fn_signature.index(')') or { return error('Invalid method signature: missing closing parenthesis for receiver') }
|
||||
|
||||
// Find the opening parenthesis of the parameters
|
||||
mut params_start := -1
|
||||
for i := receiver_end + 1; i < fn_signature.len; i++ {
|
||||
if fn_signature[i] == `(` {
|
||||
params_start = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if params_start == -1 {
|
||||
return error('Invalid method signature: missing parameter list')
|
||||
}
|
||||
|
||||
// Find the closing parenthesis of the parameters
|
||||
mut params_end := fn_signature.last_index(')') or { return error('Invalid method signature: missing closing parenthesis for parameters') }
|
||||
|
||||
// Extract the parameters
|
||||
params_str = fn_signature[params_start + 1..params_end].trim_space()
|
||||
} else {
|
||||
// Regular function
|
||||
mut params_start := fn_signature.index('(') or { return error('Invalid function signature: missing parameters') }
|
||||
mut params_end := fn_signature.last_index(')') or { return error('Invalid function signature: missing closing parenthesis') }
|
||||
|
||||
// Extract the parameters
|
||||
params_str = fn_signature[params_start + 1..params_end].trim_space()
|
||||
}
|
||||
|
||||
// Create input schema for parameters
|
||||
mut properties := map[string]mcp.ToolProperty{}
|
||||
mut required := []string{}
|
||||
|
||||
if params_str != '' {
|
||||
param_list := params_str.split(',')
|
||||
|
||||
for param in param_list {
|
||||
trimmed_param := param.trim_space()
|
||||
if trimmed_param == '' {
|
||||
continue
|
||||
}
|
||||
|
||||
// Split parameter into name and type
|
||||
param_parts := trimmed_param.split_any(' \t')
|
||||
if param_parts.len < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
param_name := param_parts[0]
|
||||
param_type := param_parts[1]
|
||||
|
||||
// Add to required parameters
|
||||
required << param_name
|
||||
|
||||
// Create property for this parameter
|
||||
mut property := mcp.ToolProperty{}
|
||||
|
||||
// Check if this is a complex type defined in the types map
|
||||
if param_type in types {
|
||||
// Parse the struct definition to create a nested schema
|
||||
struct_def := types[param_type]
|
||||
struct_schema := d.create_mcp_tool_input_schema(struct_def)!
|
||||
property = mcp.ToolProperty{
|
||||
typ: struct_schema.typ
|
||||
}
|
||||
} else {
|
||||
// Handle primitive types
|
||||
schema := d.create_mcp_tool_input_schema(param_type)!
|
||||
property = mcp.ToolProperty{
|
||||
typ: schema.typ
|
||||
}
|
||||
}
|
||||
|
||||
properties[param_name] = property
|
||||
}
|
||||
}
|
||||
|
||||
// Create the input schema
|
||||
input_schema := mcp.ToolInputSchema{
|
||||
typ: 'object',
|
||||
properties: properties,
|
||||
required: required
|
||||
}
|
||||
|
||||
// Create and return the Tool
|
||||
return mcp.Tool{
|
||||
name: fn_name,
|
||||
description: description,
|
||||
input_schema: input_schema
|
||||
}
|
||||
}
|
||||
|
||||
// create_mcp_tool_input_schema creates a ToolInputSchema for a given input type
|
||||
// input: The input type string
|
||||
// returns: A ToolInputSchema for the given input type
|
||||
// errors: Returns an error if the input type is not supported
|
||||
pub fn (d Developer) create_mcp_tool_input_schema(input string) !mcp.ToolInputSchema {
|
||||
|
||||
// if input is a primitive type, return a mcp ToolInputSchema with that type
|
||||
if input == 'string' {
|
||||
return mcp.ToolInputSchema{
|
||||
typ: 'string'
|
||||
}
|
||||
} else if input == 'int' {
|
||||
return mcp.ToolInputSchema{
|
||||
typ: 'integer'
|
||||
}
|
||||
} else if input == 'float' {
|
||||
return mcp.ToolInputSchema{
|
||||
typ: 'number'
|
||||
}
|
||||
} else if input == 'bool' {
|
||||
return mcp.ToolInputSchema{
|
||||
typ: 'boolean'
|
||||
}
|
||||
}
|
||||
|
||||
// if input is a struct, return a mcp ToolInputSchema with typ 'object' and properties for each field in the struct
|
||||
if input.starts_with('pub struct ') {
|
||||
struct_name := input[11..].split(' ')[0]
|
||||
fields := parse_struct_fields(input)
|
||||
mut properties := map[string]mcp.ToolProperty{}
|
||||
|
||||
for field_name, field_type in fields {
|
||||
property := mcp.ToolProperty{
|
||||
typ: d.create_mcp_tool_input_schema(field_type)!.typ
|
||||
}
|
||||
properties[field_name] = property
|
||||
}
|
||||
|
||||
return mcp.ToolInputSchema{
|
||||
typ: 'object',
|
||||
properties: properties
|
||||
}
|
||||
}
|
||||
|
||||
// if input is an array, return a mcp ToolInputSchema with typ 'array' and items of the item type
|
||||
if input.starts_with('[]') {
|
||||
item_type := input[2..]
|
||||
|
||||
// For array types, we create a schema with type 'array'
|
||||
// The actual item type is determined by the primitive type
|
||||
mut item_type_str := 'string' // default
|
||||
if item_type == 'int' {
|
||||
item_type_str = 'integer'
|
||||
} else if item_type == 'float' {
|
||||
item_type_str = 'number'
|
||||
} else if item_type == 'bool' {
|
||||
item_type_str = 'boolean'
|
||||
}
|
||||
|
||||
// Create a property for the array items
|
||||
mut property := mcp.ToolProperty{
|
||||
typ: 'array'
|
||||
}
|
||||
|
||||
// Add the property to the schema
|
||||
mut properties := map[string]mcp.ToolProperty{}
|
||||
properties['items'] = property
|
||||
|
||||
return mcp.ToolInputSchema{
|
||||
typ: 'array',
|
||||
properties: properties
|
||||
}
|
||||
}
|
||||
|
||||
// Default to string type for unknown types
|
||||
return mcp.ToolInputSchema{
|
||||
typ: 'string'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// parse_struct_fields parses a V language struct definition string and returns a map of field names to their types
|
||||
fn parse_struct_fields(struct_def string) map[string]string {
|
||||
mut fields := map[string]string{}
|
||||
|
||||
// Find the opening and closing braces of the struct definition
|
||||
start_idx := struct_def.index('{') or { return fields }
|
||||
end_idx := struct_def.last_index('}') or { return fields }
|
||||
|
||||
// Extract the content between the braces
|
||||
struct_content := struct_def[start_idx + 1..end_idx].trim_space()
|
||||
|
||||
// Split the content by newlines to get individual field definitions
|
||||
field_lines := struct_content.split('
|
||||
')
|
||||
|
||||
for line in field_lines {
|
||||
trimmed_line := line.trim_space()
|
||||
|
||||
// Skip empty lines and comments
|
||||
if trimmed_line == '' || trimmed_line.starts_with('//') {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle pub: or mut: prefixes
|
||||
mut field_def := trimmed_line
|
||||
if field_def.starts_with('pub:') || field_def.starts_with('mut:') {
|
||||
field_def = field_def.all_after(':').trim_space()
|
||||
}
|
||||
|
||||
// Split by whitespace to separate field name and type
|
||||
parts := field_def.split_any(' ')
|
||||
if parts.len < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
field_name := parts[0]
|
||||
field_type := parts[1..].join(' ')
|
||||
|
||||
// Handle attributes like @[json: 'name']
|
||||
if field_name.contains('@[') {
|
||||
continue
|
||||
}
|
||||
|
||||
fields[field_name] = field_type
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
module developer
|
||||
|
||||
import freeflowuniverse.herolib.mcp
|
||||
import json
|
||||
import os
|
||||
|
||||
// fn test_parse_struct_fields() {
|
||||
// // Test case 1: Simple struct with primitive types
|
||||
// simple_struct := 'pub struct User {
|
||||
// name string
|
||||
// age int
|
||||
// active bool
|
||||
// }'
|
||||
|
||||
// fields := parse_struct_fields(simple_struct)
|
||||
// assert fields.len == 3
|
||||
// assert fields['name'] == 'string'
|
||||
// assert fields['age'] == 'int'
|
||||
// assert fields['active'] == 'bool'
|
||||
|
||||
// // Test case 2: Struct with pub: and mut: sections
|
||||
// complex_struct := 'pub struct Config {
|
||||
// pub:
|
||||
// host string
|
||||
// port int
|
||||
// mut:
|
||||
// connected bool
|
||||
// retries int
|
||||
// }'
|
||||
|
||||
// fields2 := parse_struct_fields(complex_struct)
|
||||
// assert fields2.len == 4
|
||||
// assert fields2['host'] == 'string'
|
||||
// assert fields2['port'] == 'int'
|
||||
// assert fields2['connected'] == 'bool'
|
||||
// assert fields2['retries'] == 'int'
|
||||
|
||||
// // Test case 3: Struct with attributes and comments
|
||||
// struct_with_attrs := 'pub struct ApiResponse {
|
||||
// // User ID
|
||||
// id int
|
||||
// // User full name
|
||||
// name string @[json: "full_name"]
|
||||
// // Whether account is active
|
||||
// active bool
|
||||
// }'
|
||||
|
||||
// fields3 := parse_struct_fields(struct_with_attrs)
|
||||
// assert fields3.len == 3 // All fields are included
|
||||
// assert fields3['id'] == 'int'
|
||||
// assert fields3['active'] == 'bool'
|
||||
|
||||
// // Test case 4: Empty struct
|
||||
// empty_struct := 'pub struct Empty {}'
|
||||
// fields4 := parse_struct_fields(empty_struct)
|
||||
// assert fields4.len == 0
|
||||
|
||||
// println('test_parse_struct_fields passed')
|
||||
// }
|
||||
|
||||
// fn test_create_mcp_tool_input_schema() {
|
||||
// d := Developer{}
|
||||
|
||||
// // Test case 1: Primitive types
|
||||
// string_schema := d.create_mcp_tool_input_schema('string') or { panic(err) }
|
||||
// assert string_schema.typ == 'string'
|
||||
|
||||
// int_schema := d.create_mcp_tool_input_schema('int') or { panic(err) }
|
||||
// assert int_schema.typ == 'integer'
|
||||
|
||||
// float_schema := d.create_mcp_tool_input_schema('float') or { panic(err) }
|
||||
// assert float_schema.typ == 'number'
|
||||
|
||||
// bool_schema := d.create_mcp_tool_input_schema('bool') or { panic(err) }
|
||||
// assert bool_schema.typ == 'boolean'
|
||||
|
||||
// // Test case 2: Array type
|
||||
// array_schema := d.create_mcp_tool_input_schema('[]string') or { panic(err) }
|
||||
// assert array_schema.typ == 'array'
|
||||
// // In our implementation, arrays don't have items directly in the schema
|
||||
|
||||
// // Test case 3: Struct type
|
||||
// struct_def := 'pub struct Person {
|
||||
// name string
|
||||
// age int
|
||||
// }'
|
||||
|
||||
// struct_schema := d.create_mcp_tool_input_schema(struct_def) or { panic(err) }
|
||||
// assert struct_schema.typ == 'object'
|
||||
// assert struct_schema.properties.len == 2
|
||||
// assert struct_schema.properties['name'].typ == 'string'
|
||||
// assert struct_schema.properties['age'].typ == 'integer'
|
||||
|
||||
// println('test_create_mcp_tool_input_schema passed')
|
||||
// }
|
||||
|
||||
// fn test_create_mcp_tool() {
|
||||
// d := Developer{}
|
||||
|
||||
// // Test case 1: Simple function with primitive types
|
||||
// simple_fn := '// Get user by ID
|
||||
// // Returns user information
|
||||
// pub fn get_user(id int, include_details bool) {
|
||||
// // Implementation
|
||||
// }'
|
||||
|
||||
// tool1 := d.create_mcp_tool(simple_fn, {}) or { panic(err) }
|
||||
// assert tool1.name == 'get_user'
|
||||
// expected_desc1 := 'Get user by ID\nReturns user information'
|
||||
// assert tool1.description == expected_desc1
|
||||
// assert tool1.input_schema.typ == 'object'
|
||||
// assert tool1.input_schema.properties.len == 2
|
||||
// assert tool1.input_schema.properties['id'].typ == 'integer'
|
||||
// assert tool1.input_schema.properties['include_details'].typ == 'boolean'
|
||||
// assert tool1.input_schema.required.len == 2
|
||||
// assert 'id' in tool1.input_schema.required
|
||||
// assert 'include_details' in tool1.input_schema.required
|
||||
|
||||
// // Test case 2: Method with receiver
|
||||
// method_fn := '// Update user profile
|
||||
// pub fn (u User) update_profile(name string, age int) bool {
|
||||
// // Implementation
|
||||
// return true
|
||||
// }'
|
||||
|
||||
// tool2 := d.create_mcp_tool(method_fn, {}) or { panic(err) }
|
||||
// assert tool2.name == 'update_profile'
|
||||
// assert tool2.description == 'Update user profile'
|
||||
// assert tool2.input_schema.properties.len == 2
|
||||
// assert tool2.input_schema.properties['name'].typ == 'string'
|
||||
// assert tool2.input_schema.properties['age'].typ == 'integer'
|
||||
|
||||
// // Test case 3: Function with complex types
|
||||
// complex_fn := '// Create new configuration
|
||||
// // Sets up system configuration
|
||||
// fn create_config(name string, settings Config) !Config {
|
||||
// // Implementation
|
||||
// }'
|
||||
|
||||
// config_struct := 'pub struct Config {
|
||||
// server_url string
|
||||
// max_retries int
|
||||
// timeout float
|
||||
// }'
|
||||
|
||||
// tool3 := d.create_mcp_tool(complex_fn, {
|
||||
// 'Config': config_struct
|
||||
// }) or { panic(err) }
|
||||
// assert tool3.name == 'create_config'
|
||||
// expected_desc3 := 'Create new configuration\nSets up system configuration'
|
||||
// assert tool3.description == expected_desc3
|
||||
// assert tool3.input_schema.properties.len == 2
|
||||
// assert tool3.input_schema.properties['name'].typ == 'string'
|
||||
// assert tool3.input_schema.properties['settings'].typ == 'object'
|
||||
|
||||
// // Test case 4: Function with no parameters
|
||||
// no_params_fn := '// Initialize system
|
||||
// pub fn initialize() {
|
||||
// // Implementation
|
||||
// }'
|
||||
|
||||
// tool4 := d.create_mcp_tool(no_params_fn, {}) or { panic(err) }
|
||||
// assert tool4.name == 'initialize'
|
||||
// assert tool4.description == 'Initialize system'
|
||||
// assert tool4.input_schema.properties.len == 0
|
||||
// assert tool4.input_schema.required.len == 0
|
||||
|
||||
// println('test_create_mcp_tool passed')
|
||||
// }
|
||||
|
||||
// fn test_create_mcp_tool_code() {
|
||||
// d := Developer{}
|
||||
|
||||
// // Test with the complex function that has struct parameters and return type
|
||||
// module_path := "${os.dir(@FILE)}/testdata/mock_module"
|
||||
// function_name := 'test_function'
|
||||
|
||||
// code := d.create_mcp_tool_code(function_name, module_path) or {
|
||||
// panic('Failed to create MCP tool code: ${err}')
|
||||
// }
|
||||
|
||||
// // Print the code instead of panic for debugging
|
||||
// println('Generated code:')
|
||||
// println('----------------------------------------')
|
||||
// println(code)
|
||||
// println('----------------------------------------')
|
||||
|
||||
// // Verify the generated code contains the expected elements
|
||||
// assert code.contains('test_function_tool')
|
||||
// assert code.contains('TestConfig')
|
||||
// assert code.contains('TestResult')
|
||||
|
||||
// // Test with a simple function that has primitive types
|
||||
// simple_function_name := 'simple_function'
|
||||
// simple_code := d.create_mcp_tool_code(simple_function_name, module_path) or {
|
||||
// panic('Failed to create MCP tool code for simple function: ${err}')
|
||||
// }
|
||||
|
||||
// // Verify the simple function code
|
||||
// assert simple_code.contains('simple_function_tool')
|
||||
// assert simple_code.contains('name string')
|
||||
// assert simple_code.contains('count int')
|
||||
|
||||
// // println('test_create_mcp_tool_code passed')
|
||||
// }
|
||||
@@ -1,108 +0,0 @@
|
||||
module developer
|
||||
|
||||
import freeflowuniverse.herolib.mcp
|
||||
import x.json2 as json { Any }
|
||||
// import json
|
||||
|
||||
const create_mcp_tool_code_tool = mcp.Tool{
|
||||
name: 'create_mcp_tool_code'
|
||||
description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
|
||||
returns an MCP Tool code in v for attaching the function to the mcp server'
|
||||
input_schema: mcp.ToolInputSchema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'function_name': mcp.ToolProperty{
|
||||
typ: 'string'
|
||||
items: mcp.ToolItems{
|
||||
typ: ''
|
||||
enum: []
|
||||
}
|
||||
enum: []
|
||||
}
|
||||
'module_path': mcp.ToolProperty{
|
||||
typ: 'string'
|
||||
items: mcp.ToolItems{
|
||||
typ: ''
|
||||
enum: []
|
||||
}
|
||||
enum: []
|
||||
}
|
||||
}
|
||||
required: ['function_name', 'module_path']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &Developer) create_mcp_tool_code_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
function_name := arguments['function_name'].str()
|
||||
module_path := arguments['module_path'].str()
|
||||
result := d.create_mcp_tool_code(function_name, module_path) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
|
||||
// Tool definition for the create_mcp_tool function
|
||||
const create_mcp_tool_tool = mcp.Tool{
|
||||
name: 'create_mcp_tool'
|
||||
description: 'Parses a V language function string and returns an MCP Tool struct. This tool analyzes function signatures, extracts parameters, and generates the appropriate MCP Tool representation.'
|
||||
input_schema: mcp.ToolInputSchema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'function': mcp.ToolProperty{
|
||||
typ: 'string'
|
||||
}
|
||||
'types': mcp.ToolProperty{
|
||||
typ: 'object'
|
||||
}
|
||||
}
|
||||
required: ['function']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &Developer) create_mcp_tool_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
function := arguments['function'].str()
|
||||
types := json.decode[map[string]string](arguments['types'].str())!
|
||||
result := d.create_mcp_tool(function, types) or { return mcp.error_tool_call_result(err) }
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: result_to_mcp_tool_contents[string](result.str())
|
||||
}
|
||||
}
|
||||
|
||||
// Tool definition for the create_mcp_tool_handler function
|
||||
const create_mcp_tool_handler_tool = mcp.Tool{
|
||||
name: 'create_mcp_tool_handler'
|
||||
description: 'Generates a tool handler for the create_mcp_tool function. This tool handler accepts function string and types map and returns an MCP ToolCallResult.'
|
||||
input_schema: mcp.ToolInputSchema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'function': mcp.ToolProperty{
|
||||
typ: 'string'
|
||||
}
|
||||
'types': mcp.ToolProperty{
|
||||
typ: 'object'
|
||||
}
|
||||
'result': mcp.ToolProperty{
|
||||
typ: 'string'
|
||||
}
|
||||
}
|
||||
required: ['function', 'result']
|
||||
}
|
||||
}
|
||||
|
||||
// Tool handler for the create_mcp_tool_handler function
|
||||
pub fn (d &Developer) create_mcp_tool_handler_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
function := arguments['function'].str()
|
||||
types := json.decode[map[string]string](arguments['types'].str())!
|
||||
result_ := arguments['result'].str()
|
||||
result := d.create_mcp_tool_handler(function, types, result_) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
module developer
|
||||
|
||||
import freeflowuniverse.herolib.mcp.logger
|
||||
import freeflowuniverse.herolib.mcp
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
// pub fn new_mcp_server(d &Developer) !&mcp.Server {
|
||||
// logger.info('Creating new Developer MCP server')
|
||||
|
||||
// // Initialize the server with the empty handlers map
|
||||
// mut server := mcp.new_server(mcp.MemoryBackend{
|
||||
// tools: {
|
||||
// 'create_mcp_tool': create_mcp_tool_tool
|
||||
// 'create_mcp_tool_handler': create_mcp_tool_handler_tool
|
||||
// 'create_mcp_tool_code': create_mcp_tool_code_tool
|
||||
// }
|
||||
// tool_handlers: {
|
||||
// 'create_mcp_tool': d.create_mcp_tool_tool_handler
|
||||
// 'create_mcp_tool_handler': d.create_mcp_tool_handler_tool_handler
|
||||
// 'create_mcp_tool_code': d.create_mcp_tool_code_tool_handler
|
||||
// }
|
||||
// }, mcp.ServerParams{
|
||||
// config: mcp.ServerConfiguration{
|
||||
// server_info: mcp.ServerInfo{
|
||||
// name: 'developer'
|
||||
// version: '1.0.0'
|
||||
// }
|
||||
// }
|
||||
// })!
|
||||
// return server
|
||||
// }
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.mcp.developer
|
||||
import freeflowuniverse.herolib.mcp.logger
|
||||
|
||||
mut server := developer.new_mcp_server(&developer.Developer{})!
|
||||
server.start() or {
|
||||
logger.fatal('Error starting server: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
pub fn (d &Developer) @{function.name}_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
@{decode_stmts}
|
||||
result := d.@{function.name}(@{function.params.map(it.name).join(',')})
|
||||
or {
|
||||
return error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: result_to_mcp_tool_content[@{result.symbol()}](result)
|
||||
}
|
||||
}
|
||||
38
TOSORT/developer/testdata/mock_module/mock.v
vendored
38
TOSORT/developer/testdata/mock_module/mock.v
vendored
@@ -1,38 +0,0 @@
|
||||
module mock_module
|
||||
|
||||
// TestConfig represents a configuration for testing
|
||||
pub struct TestConfig {
|
||||
pub:
|
||||
name string
|
||||
enabled bool
|
||||
count int
|
||||
value float64
|
||||
}
|
||||
|
||||
// TestResult represents the result of a test operation
|
||||
pub struct TestResult {
|
||||
pub:
|
||||
success bool
|
||||
message string
|
||||
code int
|
||||
}
|
||||
|
||||
// test_function is a simple function for testing the MCP tool code generation
|
||||
// It takes a config and returns a result
|
||||
pub fn test_function(config TestConfig) !TestResult {
|
||||
// This is just a mock implementation for testing purposes
|
||||
if config.name == '' {
|
||||
return error('Name cannot be empty')
|
||||
}
|
||||
|
||||
return TestResult{
|
||||
success: config.enabled
|
||||
message: 'Test completed for ${config.name}'
|
||||
code: if config.enabled { 0 } else { 1 }
|
||||
}
|
||||
}
|
||||
|
||||
// simple_function is a function with primitive types for testing
|
||||
pub fn simple_function(name string, count int) string {
|
||||
return '${name} count: ${count}'
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
module developer
|
||||
|
||||
import freeflowuniverse.herolib.mcp
|
||||
|
||||
const get_function_from_file_tool = mcp.Tool{
|
||||
name: 'get_function_from_file'
|
||||
description: 'get_function_from_file parses a V file and extracts a specific function block including its comments
|
||||
ARGS:
|
||||
file_path string - path to the V file
|
||||
function_name string - name of the function to extract
|
||||
RETURNS: string - the function block including comments, or empty string if not found'
|
||||
input_schema: mcp.ToolInputSchema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'file_path': mcp.ToolProperty{
|
||||
typ: 'string'
|
||||
items: mcp.ToolItems{
|
||||
typ: ''
|
||||
enum: []
|
||||
}
|
||||
enum: []
|
||||
}
|
||||
'function_name': mcp.ToolProperty{
|
||||
typ: 'string'
|
||||
items: mcp.ToolItems{
|
||||
typ: ''
|
||||
enum: []
|
||||
}
|
||||
enum: []
|
||||
}
|
||||
}
|
||||
required: ['file_path', 'function_name']
|
||||
}
|
||||
}
|
||||
@@ -51,7 +51,7 @@ fn do() ! {
|
||||
mut cmd := Command{
|
||||
name: 'hero'
|
||||
description: 'Your HERO toolset.'
|
||||
version: '1.0.22'
|
||||
version: '1.0.25'
|
||||
}
|
||||
|
||||
// herocmds.cmd_run_add_flags(mut cmd)
|
||||
|
||||
71
examples/aiexamples/groq.vsh
Executable file
71
examples/aiexamples/groq.vsh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
import os
|
||||
|
||||
fn test1(mut client openai.OpenAI) ! {
|
||||
instruction := '
|
||||
You are a template language converter. You convert Pug templates to Jet templates.
|
||||
|
||||
The target template language, Jet, is defined as follows:
|
||||
'
|
||||
|
||||
// Create a chat completion request
|
||||
res := client.chat_completion(
|
||||
msgs: openai.Messages{
|
||||
messages: [
|
||||
openai.Message{
|
||||
role: .user
|
||||
content: 'What are the key differences between Groq and other AI inference providers?'
|
||||
},
|
||||
]
|
||||
}
|
||||
)!
|
||||
|
||||
// Print the response
|
||||
println('\nGroq AI Response:')
|
||||
println('==================')
|
||||
println(res.choices[0].message.content)
|
||||
println('\nUsage Statistics:')
|
||||
println('Prompt tokens: ${res.usage.prompt_tokens}')
|
||||
println('Completion tokens: ${res.usage.completion_tokens}')
|
||||
println('Total tokens: ${res.usage.total_tokens}')
|
||||
}
|
||||
|
||||
fn test2(mut client openai.OpenAI) ! {
|
||||
// Create a chat completion request
|
||||
res := client.chat_completion(
|
||||
model: 'deepseek-r1-distill-llama-70b'
|
||||
msgs: openai.Messages{
|
||||
messages: [
|
||||
openai.Message{
|
||||
role: .user
|
||||
content: 'A story of 10 lines?'
|
||||
},
|
||||
]
|
||||
}
|
||||
)!
|
||||
|
||||
println('\nGroq AI Response:')
|
||||
println('==================')
|
||||
println(res.choices[0].message.content)
|
||||
println('\nUsage Statistics:')
|
||||
println('Prompt tokens: ${res.usage.prompt_tokens}')
|
||||
println('Completion tokens: ${res.usage.completion_tokens}')
|
||||
println('Total tokens: ${res.usage.total_tokens}')
|
||||
}
|
||||
|
||||
println("
|
||||
TO USE:
|
||||
export AIKEY='gsk_...'
|
||||
export AIURL='https://api.groq.com/openai/v1'
|
||||
export AIMODEL='llama-3.3-70b-versatile'
|
||||
")
|
||||
|
||||
mut client := openai.get(name: 'test')!
|
||||
println(client)
|
||||
|
||||
// test1(mut client)!
|
||||
test2(mut client)!
|
||||
7
examples/aiexamples/jetconvertor.vsh
Executable file
7
examples/aiexamples/jetconvertor.vsh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.mcp.aitools
|
||||
|
||||
// aitools.convert_pug("/root/code/github/freeflowuniverse/herolauncher/pkg/herolauncher/web/templates/admin")!
|
||||
|
||||
aitools.convert_pug('/root/code/github/freeflowuniverse/herolauncher/pkg/zaz/webui/templates')!
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.jina
|
||||
import freeflowuniverse.herolib.osal
|
||||
import os
|
||||
|
||||
// Example of using the Jina client
|
||||
|
||||
fn main() {
|
||||
// Set environment variable for testing
|
||||
// In production, you would set this in your environment
|
||||
// osal.env_set(key: 'JINAKEY', value: 'your-api-key')
|
||||
|
||||
// Check if JINAKEY environment variable exists
|
||||
if !osal.env_exists('JINAKEY') {
|
||||
println('JINAKEY environment variable not set. Please set it before running this example.')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Create a Jina client instance
|
||||
mut client := jina.get(name: 'default')!
|
||||
|
||||
println('Jina client initialized successfully.')
|
||||
|
||||
// Example: Create embeddings
|
||||
model := 'jina-embeddings-v3'
|
||||
texts := ['Hello, world!', 'How are you doing?']
|
||||
|
||||
println('Creating embeddings for texts: ${texts}')
|
||||
result := client.create_embeddings(texts, model, 'retrieval.query')!
|
||||
|
||||
println('Embeddings created successfully.')
|
||||
println('Model: ${result['model']}')
|
||||
println('Data count: ${result['data'].arr().len}')
|
||||
|
||||
// Example: List classifiers
|
||||
println('\nListing classifiers:')
|
||||
classifiers := client.list_classifiers() or {
|
||||
println('Failed to list classifiers: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
println('Classifiers retrieved successfully.')
|
||||
|
||||
// Example: Create a classifier
|
||||
println('\nTraining a classifier:')
|
||||
examples := [
|
||||
jina.TrainingExample{
|
||||
text: 'This movie was great!'
|
||||
label: 'positive'
|
||||
},
|
||||
jina.TrainingExample{
|
||||
text: 'I did not like this movie.'
|
||||
label: 'negative'
|
||||
},
|
||||
jina.TrainingExample{
|
||||
text: 'The movie was okay.'
|
||||
label: 'neutral'
|
||||
},
|
||||
]
|
||||
|
||||
training_result := client.train(examples, model, 'private') or {
|
||||
println('Failed to train classifier: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
println('Classifier trained successfully.')
|
||||
println('Classifier ID: ${training_result['classifier_id']}')
|
||||
}
|
||||
128
examples/aiexamples/qdrant.vsh
Executable file
128
examples/aiexamples/qdrant.vsh
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.qdrant
|
||||
import freeflowuniverse.herolib.installers.db.qdrant_installer
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import rand
|
||||
import os
|
||||
|
||||
println('Starting Qdrant example script')
|
||||
|
||||
// Print environment information
|
||||
println('Current directory: ${os.getwd()}')
|
||||
println('Home directory: ${os.home_dir()}')
|
||||
|
||||
mut i := qdrant_installer.get()!
|
||||
i.install()!
|
||||
|
||||
// 1. Get the qdrant client
|
||||
println('Getting Qdrant client...')
|
||||
mut qdrant_client := qdrant.get()!
|
||||
println('Qdrant client URL: ${qdrant_client.url}')
|
||||
|
||||
// Check if Qdrant server is running
|
||||
println('Checking Qdrant server health...')
|
||||
health := qdrant_client.health_check() or {
|
||||
println('Error checking health: ${err}')
|
||||
false
|
||||
}
|
||||
println('Qdrant server health: ${health}')
|
||||
|
||||
// Get service info
|
||||
println('Getting Qdrant service info...')
|
||||
service_info := qdrant_client.get_service_info() or {
|
||||
println('Error getting service info: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Qdrant service info: ${service_info}')
|
||||
|
||||
// 2. Generate collection name
|
||||
collection_name := 'collection_' + rand.string(4)
|
||||
println('Generated collection name: ${collection_name}')
|
||||
|
||||
// 3. Create a new collection
|
||||
println('Creating collection...')
|
||||
created_collection := qdrant_client.create_collection(
|
||||
collection_name: collection_name
|
||||
size: 15
|
||||
distance: 'Cosine'
|
||||
) or {
|
||||
println('Error creating collection: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Created Collection: ${created_collection}')
|
||||
|
||||
// 4. Get the created collection
|
||||
println('Getting collection...')
|
||||
get_collection := qdrant_client.get_collection(
|
||||
collection_name: collection_name
|
||||
) or {
|
||||
println('Error getting collection: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Get Collection: ${get_collection}')
|
||||
|
||||
// 5. List all collections
|
||||
println('Listing collections...')
|
||||
list_collection := qdrant_client.list_collections() or {
|
||||
println('Error listing collections: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('List Collection: ${list_collection}')
|
||||
|
||||
// 6. Check collection existence
|
||||
println('Checking collection existence...')
|
||||
collection_existence := qdrant_client.is_collection_exists(
|
||||
collection_name: collection_name
|
||||
) or {
|
||||
println('Error checking collection existence: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Collection Existence: ${collection_existence}')
|
||||
|
||||
// 7. Retrieve points
|
||||
println('Retrieving points...')
|
||||
collection_points := qdrant_client.retrieve_points(
|
||||
collection_name: collection_name
|
||||
ids: [
|
||||
0,
|
||||
3,
|
||||
100,
|
||||
]
|
||||
) or {
|
||||
println('Error retrieving points: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Collection Points: ${collection_points}')
|
||||
|
||||
// 8. Upsert points
|
||||
println('Upserting points...')
|
||||
upsert_points := qdrant_client.upsert_points(
|
||||
collection_name: collection_name
|
||||
points: [
|
||||
qdrant.Point{
|
||||
payload: {
|
||||
'key': 'value'
|
||||
}
|
||||
vector: [1.0, 2.0, 3.0]
|
||||
},
|
||||
qdrant.Point{
|
||||
payload: {
|
||||
'key': 'value'
|
||||
}
|
||||
vector: [4.0, 5.0, 6.0]
|
||||
},
|
||||
qdrant.Point{
|
||||
payload: {
|
||||
'key': 'value'
|
||||
}
|
||||
vector: [7.0, 8.0, 9.0]
|
||||
},
|
||||
]
|
||||
) or {
|
||||
println('Error upserting points: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Upsert Points: ${upsert_points}')
|
||||
|
||||
println('Qdrant example script completed successfully')
|
||||
@@ -1 +0,0 @@
|
||||
export GROQ_API_KEY="your-groq-api-key-here"
|
||||
@@ -1,64 +0,0 @@
|
||||
# Groq AI Client Example
|
||||
|
||||
This example demonstrates how to use Groq's AI API with the herolib OpenAI client. Groq provides API compatibility with OpenAI's client libraries, allowing you to leverage Groq's fast inference speeds with minimal changes to your existing code.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- V programming language installed
|
||||
- A Groq API key (get one from [Groq's website](https://console.groq.com/keys))
|
||||
|
||||
## Setup
|
||||
|
||||
1. Copy the `.env.example` file to `.env`:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
2. Edit the `.env` file and replace `your-groq-api-key-here` with your actual Groq API key.
|
||||
|
||||
3. Load the environment variables:
|
||||
|
||||
```bash
|
||||
source .env
|
||||
```
|
||||
|
||||
## Running the Example
|
||||
|
||||
Execute the script with:
|
||||
|
||||
```bash
|
||||
v run groq_client.vsh
|
||||
```
|
||||
|
||||
Or make it executable first:
|
||||
|
||||
```bash
|
||||
chmod +x groq_client.vsh
|
||||
./groq_client.vsh
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
The example uses the existing OpenAI client from herolib but configures it to use Groq's API endpoint:
|
||||
|
||||
1. It retrieves the Groq API key from the environment variables
|
||||
2. Configures the OpenAI client with the Groq API key
|
||||
3. Overrides the default OpenAI URL with Groq's API URL (`https://api.groq.com/openai/v1`)
|
||||
4. Sends a chat completion request to Groq's API
|
||||
5. Displays the response
|
||||
|
||||
## Supported Models
|
||||
|
||||
Groq supports various models including:
|
||||
|
||||
- llama2-70b-4096
|
||||
- mixtral-8x7b-32768
|
||||
- gemma-7b-it
|
||||
|
||||
For a complete and up-to-date list of supported models, refer to the [Groq API documentation](https://console.groq.com/docs/models).
|
||||
|
||||
## Notes
|
||||
|
||||
- The example uses the `gpt_3_5_turbo` enum from the OpenAI client, but Groq will automatically map this to an appropriate model on their end.
|
||||
- For production use, you may want to explicitly specify one of Groq's supported models.
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
import os
|
||||
|
||||
fn main() {
|
||||
// Get API key from environment variable
|
||||
key := os.getenv('GROQ_API_KEY')
|
||||
if key == '' {
|
||||
println('Error: GROQ_API_KEY environment variable not set')
|
||||
println('Please set it by running: source .env')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Get the configured client
|
||||
mut client := openai.OpenAI{
|
||||
name: 'groq'
|
||||
api_key: key
|
||||
server_url: 'https://api.groq.com/openai/v1'
|
||||
}
|
||||
|
||||
// Define the model and message for chat completion
|
||||
// Note: Use a model that Groq supports, like llama2-70b-4096 or mixtral-8x7b-32768
|
||||
model := 'qwen-2.5-coder-32b'
|
||||
|
||||
// Create a chat completion request
|
||||
res := client.chat_completion(model, openai.Messages{
|
||||
messages: [
|
||||
openai.Message{
|
||||
role: .user
|
||||
content: 'What are the key differences between Groq and other AI inference providers?'
|
||||
},
|
||||
]
|
||||
})!
|
||||
|
||||
// Print the response
|
||||
println('\nGroq AI Response:')
|
||||
println('==================')
|
||||
println(res.choices[0].message.content)
|
||||
println('\nUsage Statistics:')
|
||||
println('Prompt tokens: ${res.usage.prompt_tokens}')
|
||||
println('Completion tokens: ${res.usage.completion_tokens}')
|
||||
println('Total tokens: ${res.usage.total_tokens}')
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.qdrant
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import rand
|
||||
|
||||
// 1. Get the qdrant client
|
||||
mut qdrant_client := qdrant.get()!
|
||||
|
||||
// 2. Generate collection name
|
||||
|
||||
collection_name := 'collection_' + rand.string(4)
|
||||
|
||||
// 2. Create a new collection
|
||||
|
||||
created_collection := qdrant_client.create_collection(
|
||||
collection_name: collection_name
|
||||
size: 15
|
||||
distance: 'Cosine'
|
||||
)!
|
||||
|
||||
println('Created Collection: ${created_collection}')
|
||||
|
||||
// 3. Get the created collection
|
||||
get_collection := qdrant_client.get_collection(
|
||||
collection_name: collection_name
|
||||
)!
|
||||
|
||||
println('Get Collection: ${get_collection}')
|
||||
|
||||
// 4. Delete the created collection
|
||||
// deleted_collection := qdrant_client.delete_collection(
|
||||
// collection_name: collection_name
|
||||
// )!
|
||||
|
||||
// println('Deleted Collection: ${deleted_collection}')
|
||||
|
||||
// 5. List all collections
|
||||
list_collection := qdrant_client.list_collections()!
|
||||
println('List Collection: ${list_collection}')
|
||||
|
||||
// 6. Check collection existence
|
||||
collection_existence := qdrant_client.is_collection_exists(
|
||||
collection_name: collection_name
|
||||
)!
|
||||
println('Collection Existence: ${collection_existence}')
|
||||
|
||||
// 7. Retrieve points
|
||||
collection_points := qdrant_client.retrieve_points(
|
||||
collection_name: collection_name
|
||||
ids: [
|
||||
0,
|
||||
3,
|
||||
100,
|
||||
]
|
||||
)!
|
||||
|
||||
println('Collection Points: ${collection_points}')
|
||||
|
||||
// 8. Upsert points
|
||||
upsert_points := qdrant_client.upsert_points(
|
||||
collection_name: collection_name
|
||||
points: [
|
||||
qdrant.Point{
|
||||
payload: {
|
||||
'key': 'value'
|
||||
}
|
||||
vector: [1.0, 2.0, 3.0]
|
||||
},
|
||||
qdrant.Point{
|
||||
payload: {
|
||||
'key': 'value'
|
||||
}
|
||||
vector: [4.0, 5.0, 6.0]
|
||||
},
|
||||
qdrant.Point{
|
||||
payload: {
|
||||
'key': 'value'
|
||||
}
|
||||
vector: [7.0, 8.0, 9.0]
|
||||
},
|
||||
]
|
||||
)!
|
||||
|
||||
println('Upsert Points: ${upsert_points}')
|
||||
@@ -6,3 +6,4 @@ mut db := qdrant_installer.get()!
|
||||
|
||||
db.install()!
|
||||
db.start()!
|
||||
db.destroy()!
|
||||
|
||||
1
examples/webdav/.gitignore
vendored
Normal file
1
examples/webdav/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
webdav_vfs
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.vfs.webdav
|
||||
import cli { Command, Flag }
|
||||
import os
|
||||
|
||||
fn main() {
|
||||
mut cmd := Command{
|
||||
name: 'webdav'
|
||||
description: 'Vlang Webdav Server'
|
||||
}
|
||||
|
||||
mut app := Command{
|
||||
name: 'webdav'
|
||||
description: 'Vlang Webdav Server'
|
||||
execute: fn (cmd Command) ! {
|
||||
port := cmd.flags.get_int('port')!
|
||||
directory := cmd.flags.get_string('directory')!
|
||||
user := cmd.flags.get_string('user')!
|
||||
password := cmd.flags.get_string('password')!
|
||||
|
||||
mut server := webdav.new_app(
|
||||
root_dir: directory
|
||||
server_port: port
|
||||
user_db: {
|
||||
user: password
|
||||
}
|
||||
)!
|
||||
|
||||
server.run()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
app.add_flag(Flag{
|
||||
flag: .int
|
||||
name: 'port'
|
||||
abbrev: 'p'
|
||||
description: 'server port'
|
||||
default_value: ['8000']
|
||||
})
|
||||
|
||||
app.add_flag(Flag{
|
||||
flag: .string
|
||||
required: true
|
||||
name: 'directory'
|
||||
abbrev: 'd'
|
||||
description: 'server directory'
|
||||
})
|
||||
|
||||
app.add_flag(Flag{
|
||||
flag: .string
|
||||
required: true
|
||||
name: 'user'
|
||||
abbrev: 'u'
|
||||
description: 'username'
|
||||
})
|
||||
|
||||
app.add_flag(Flag{
|
||||
flag: .string
|
||||
required: true
|
||||
name: 'password'
|
||||
abbrev: 'pw'
|
||||
description: 'user password'
|
||||
})
|
||||
|
||||
app.setup()
|
||||
app.parse(os.args)
|
||||
}
|
||||
@@ -8,8 +8,8 @@ import log
|
||||
|
||||
const database_path = os.join_path(os.dir(@FILE), 'database')
|
||||
|
||||
mut metadata_db := ourdb.new(path: os.join_path(database_path, 'metadata'))!
|
||||
mut data_db := ourdb.new(path: os.join_path(database_path, 'data'))!
|
||||
mut metadata_db := ourdb.new(path: os.join_path(database_path, 'metadata'), reset: true)!
|
||||
mut data_db := ourdb.new(path: os.join_path(database_path, 'data', reset: true))!
|
||||
mut vfs := vfs_db.new(mut metadata_db, mut data_db)!
|
||||
mut server := webdav.new_server(
|
||||
vfs: vfs
|
||||
|
||||
8
examples/webtools/docusaurus_example.vsh
Executable file
8
examples/webtools/docusaurus_example.vsh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cg -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.web.docusaurus
|
||||
|
||||
// Create a new docusaurus factory
|
||||
mut docs := docusaurus.new(
|
||||
build_path: '/tmp/docusaurus_build'
|
||||
)!
|
||||
238
examples/webtools/docusaurus_example_complete.vsh
Executable file
238
examples/webtools/docusaurus_example_complete.vsh
Executable file
@@ -0,0 +1,238 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cg -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.web.docusaurus
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import os
|
||||
|
||||
fn main() {
|
||||
println('Starting Docusaurus Example with HeroScript')
|
||||
|
||||
// Define the HeroScript that configures our Docusaurus site
|
||||
hero_script := '
|
||||
!!docusaurus.config
|
||||
name:"my-documentation"
|
||||
title:"My Documentation Site"
|
||||
tagline:"Documentation made simple with V and Docusaurus"
|
||||
url:"https://docs.example.com"
|
||||
url_home:"docs/"
|
||||
base_url:"/"
|
||||
favicon:"img/favicon.png"
|
||||
image:"img/hero.png"
|
||||
copyright:"© 2025 Example Organization"
|
||||
|
||||
!!docusaurus.config_meta
|
||||
description:"Comprehensive documentation for our amazing project"
|
||||
image:"https://docs.example.com/img/social-card.png"
|
||||
title:"My Documentation | Official Docs"
|
||||
|
||||
!!docusaurus.ssh_connection
|
||||
name:"production"
|
||||
host:"example.com"
|
||||
login:"deploy"
|
||||
port:22
|
||||
key_path:"~/.ssh/id_rsa"
|
||||
|
||||
!!docusaurus.build_dest
|
||||
ssh_name:"production"
|
||||
path:"/var/www/docs"
|
||||
|
||||
!!docusaurus.navbar
|
||||
title:"My Project"
|
||||
|
||||
!!docusaurus.navbar_item
|
||||
label:"Documentation"
|
||||
href:"/docs"
|
||||
position:"left"
|
||||
|
||||
!!docusaurus.navbar_item
|
||||
label:"API"
|
||||
href:"/api"
|
||||
position:"left"
|
||||
|
||||
!!docusaurus.navbar_item
|
||||
label:"GitHub"
|
||||
href:"https://github.com/example/repo"
|
||||
position:"right"
|
||||
|
||||
!!docusaurus.footer
|
||||
style:"dark"
|
||||
|
||||
!!docusaurus.footer_item
|
||||
title:"Documentation"
|
||||
label:"Introduction"
|
||||
to:"/docs"
|
||||
|
||||
!!docusaurus.footer_item
|
||||
title:"Documentation"
|
||||
label:"API Reference"
|
||||
to:"/api"
|
||||
|
||||
!!docusaurus.footer_item
|
||||
title:"Community"
|
||||
label:"GitHub"
|
||||
href:"https://github.com/example/repo"
|
||||
|
||||
!!docusaurus.footer_item
|
||||
title:"Community"
|
||||
label:"Discord"
|
||||
href:"https://discord.gg/example"
|
||||
|
||||
!!docusaurus.footer_item
|
||||
title:"More"
|
||||
label:"Blog"
|
||||
href:"https://blog.example.com"
|
||||
|
||||
!!docusaurus.import_source
|
||||
url:"https://github.com/example/external-docs"
|
||||
dest:"external"
|
||||
replace:"PROJECT_NAME:My Project, VERSION:1.0.0"
|
||||
'
|
||||
|
||||
mut docs := docusaurus.new(
|
||||
build_path: os.join_path(os.home_dir(), 'hero/var/docusaurus_demo1')
|
||||
update: true // Update the templates
|
||||
heroscript: hero_script
|
||||
) or {
|
||||
eprintln('Error creating docusaurus factory with inline script: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Create a site directory if it doesn't exist
|
||||
site_path := os.join_path(os.home_dir(), 'hero/var/docusaurus_demo_src')
|
||||
os.mkdir_all(site_path) or {
|
||||
eprintln('Error creating site directory: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Get or create a site using the factory
|
||||
println('Creating site...')
|
||||
mut site := docs.get(
|
||||
name: 'my-documentation'
|
||||
path: site_path
|
||||
init: true // Create if it doesn't exist
|
||||
// Note: The site will use the config from the previously processed HeroScript
|
||||
) or {
|
||||
eprintln('Error creating site: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Generate a sample markdown file for the docs
|
||||
println('Creating sample markdown content...')
|
||||
mut docs_dir := pathlib.get_dir(path: os.join_path(site_path, 'docs'), create: true) or {
|
||||
eprintln('Error creating docs directory: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Create intro.md file
|
||||
mut intro_file := docs_dir.file_get_new('intro.md') or {
|
||||
eprintln('Error creating intro file: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
intro_content := '---
|
||||
title: Introduction
|
||||
slug: /
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# Welcome to My Documentation
|
||||
|
||||
This is a sample documentation site created with Docusaurus and HeroLib V using HeroScript configuration.
|
||||
|
||||
## Features
|
||||
|
||||
- Easy to use
|
||||
- Markdown support
|
||||
- Customizable
|
||||
- Search functionality
|
||||
|
||||
## Getting Started
|
||||
|
||||
Follow these steps to get started:
|
||||
|
||||
1. Installation
|
||||
2. Configuration
|
||||
3. Adding content
|
||||
4. Deployment
|
||||
'
|
||||
intro_file.write(intro_content) or {
|
||||
eprintln('Error writing to intro file: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Create quick-start.md file
|
||||
mut quickstart_file := docs_dir.file_get_new('quick-start.md') or {
|
||||
eprintln('Error creating quickstart file: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
quickstart_content := '---
|
||||
title: Quick Start
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
# Quick Start Guide
|
||||
|
||||
This guide will help you get up and running quickly.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ npm install my-project
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```javascript
|
||||
import { myFunction } from "my-project";
|
||||
|
||||
// Use the function
|
||||
const result = myFunction();
|
||||
console.log(result);
|
||||
```
|
||||
'
|
||||
quickstart_file.write(quickstart_content) or {
|
||||
eprintln('Error writing to quickstart file: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Generate the site
|
||||
println('Generating site...')
|
||||
site.generate() or {
|
||||
eprintln('Error generating site: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Site generated successfully!')
|
||||
|
||||
// Choose which operation to perform:
|
||||
|
||||
// Option 1: Run in development mode
|
||||
// This will start a development server in a screen session
|
||||
println('Starting development server...')
|
||||
site.dev() or {
|
||||
eprintln('Error starting development server: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Option 2: Build for production (uncomment to use)
|
||||
/*
|
||||
println('Building site for production...')
|
||||
site.build() or {
|
||||
eprintln('Error building site: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Site built successfully!')
|
||||
*/
|
||||
|
||||
// Option 3: Build and publish to the remote server (uncomment to use)
|
||||
/*
|
||||
println('Building and publishing site...')
|
||||
site.build_publish() or {
|
||||
eprintln('Error publishing site: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Site published successfully!')
|
||||
*/
|
||||
}
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
|
||||
os_name="$(uname -s)"
|
||||
arch_name="$(uname -m)"
|
||||
version='1.0.22'
|
||||
version='1.0.25'
|
||||
|
||||
|
||||
# Base URL for GitHub releases
|
||||
|
||||
106
install_v.sh
106
install_v.sh
@@ -60,6 +60,22 @@ command_exists() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Function to run commands with sudo if needed
|
||||
function run_sudo() {
|
||||
# Check if we're already root
|
||||
if [ "$(id -u)" -eq 0 ]; then
|
||||
# We are root, run the command directly
|
||||
"$@"
|
||||
# Check if sudo is installed
|
||||
elif command_exists sudo; then
|
||||
# Use sudo to run the command
|
||||
sudo "$@"
|
||||
else
|
||||
# No sudo available, try to run directly
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
export DIR_BASE="$HOME"
|
||||
export DIR_BUILD="/tmp"
|
||||
export DIR_CODE="$DIR_BASE/code"
|
||||
@@ -93,7 +109,7 @@ function package_install {
|
||||
local command_name="$1"
|
||||
if [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
if is_github_actions; then
|
||||
sudo apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential
|
||||
run_sudo apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential
|
||||
else
|
||||
apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential
|
||||
fi
|
||||
@@ -167,8 +183,8 @@ function os_update {
|
||||
fi
|
||||
export TERM=xterm
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
sudo dpkg --configure -a
|
||||
sudo apt update -y
|
||||
run_sudo dpkg --configure -a
|
||||
run_sudo apt update -y
|
||||
if is_github_actions; then
|
||||
echo "** IN GITHUB ACTIONS, DON'T DO UPDATE"
|
||||
else
|
||||
@@ -242,8 +258,11 @@ function hero_lib_get {
|
||||
}
|
||||
|
||||
function install_secp256k1 {
|
||||
|
||||
echo "Installing secp256k1..."
|
||||
if [[ "${OSNAME}" == "darwin"* ]]; then
|
||||
# Attempt installation only if not already found
|
||||
echo "Attempting secp256k1 installation via Homebrew..."
|
||||
brew install secp256k1
|
||||
elif [[ "${OSNAME}" == "ubuntu" ]]; then
|
||||
# Install build dependencies
|
||||
@@ -260,7 +279,7 @@ function install_secp256k1 {
|
||||
./configure
|
||||
make -j 5
|
||||
if is_github_actions; then
|
||||
sudo make install
|
||||
run_sudo make install
|
||||
else
|
||||
make install
|
||||
fi
|
||||
@@ -281,16 +300,16 @@ remove_all() {
|
||||
# Set reset to true to use existing reset functionality
|
||||
RESET=true
|
||||
# Call reset functionality
|
||||
sudo rm -rf ~/code/v
|
||||
sudo rm -rf ~/_code/v
|
||||
sudo rm -rf ~/.config/v-analyzer
|
||||
run_sudo rm -rf ~/code/v
|
||||
run_sudo rm -rf ~/_code/v
|
||||
run_sudo rm -rf ~/.config/v-analyzer
|
||||
if command_exists v; then
|
||||
echo "Removing V from system..."
|
||||
sudo rm -f $(which v)
|
||||
run_sudo rm -f $(which v)
|
||||
fi
|
||||
if command_exists v-analyzer; then
|
||||
echo "Removing v-analyzer from system..."
|
||||
sudo rm -f $(which v-analyzer)
|
||||
run_sudo rm -f $(which v-analyzer)
|
||||
fi
|
||||
|
||||
# Remove v-analyzer path from rc files
|
||||
@@ -317,8 +336,6 @@ remove_all() {
|
||||
# Function to check if a service is running and start it if needed
|
||||
check_and_start_redis() {
|
||||
|
||||
|
||||
|
||||
# Normal service management for non-container environments
|
||||
if [[ "${OSNAME}" == "ubuntu" ]] || [[ "${OSNAME}" == "debian" ]]; then
|
||||
|
||||
@@ -326,12 +343,12 @@ check_and_start_redis() {
|
||||
if is_github_actions; then
|
||||
|
||||
# Import Redis GPG key
|
||||
curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
|
||||
curl -fsSL https://packages.redis.io/gpg | run_sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
|
||||
# Add Redis repository
|
||||
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list
|
||||
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | run_sudo tee /etc/apt/sources.list.d/redis.list
|
||||
# Install Redis
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y redis
|
||||
run_sudo apt-get update
|
||||
run_sudo apt-get install -y redis
|
||||
|
||||
# Start Redis
|
||||
redis-server --daemonize yes
|
||||
@@ -366,7 +383,7 @@ check_and_start_redis() {
|
||||
echo "redis is already running."
|
||||
else
|
||||
echo "redis is not running. Starting it..."
|
||||
sudo systemctl start "redis"
|
||||
run_sudo systemctl start "redis"
|
||||
if systemctl is-active --quiet "redis"; then
|
||||
echo "redis started successfully."
|
||||
else
|
||||
@@ -411,7 +428,7 @@ check_and_start_redis() {
|
||||
echo "redis is already running."
|
||||
else
|
||||
echo "redis is not running. Starting it..."
|
||||
sudo systemctl start "redis"
|
||||
run_sudo systemctl start "redis"
|
||||
fi
|
||||
else
|
||||
echo "Service management for redis is not implemented for platform: $OSNAME"
|
||||
@@ -421,16 +438,47 @@ check_and_start_redis() {
|
||||
|
||||
v-install() {
|
||||
|
||||
# Check if v is already installed and in PATH
|
||||
if command_exists v; then
|
||||
echo "V is already installed and in PATH."
|
||||
# Optionally, verify the installation location or version if needed
|
||||
# For now, just exit the function assuming it's okay
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
# Only clone and install if directory doesn't exist
|
||||
if [ ! -d ~/code/v ]; then
|
||||
echo "Installing V..."
|
||||
# Note: The original check was for ~/code/v, but the installation happens in ~/_code/v.
|
||||
if [ ! -d ~/_code/v ]; then
|
||||
echo "Cloning V..."
|
||||
mkdir -p ~/_code
|
||||
cd ~/_code
|
||||
git clone --depth=1 https://github.com/vlang/v
|
||||
cd v
|
||||
make
|
||||
sudo ./v symlink
|
||||
if ! git clone --depth=1 https://github.com/vlang/v; then
|
||||
echo "❌ Failed to clone V. Cleaning up..."
|
||||
rm -rf "$V_DIR"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Only clone and install if directory doesn't exist
|
||||
# Note: The original check was for ~/code/v, but the installation happens in ~/_code/v.
|
||||
# Adjusting the check to the actual installation directory.
|
||||
echo "Building V..."
|
||||
cd ~/_code/v
|
||||
make
|
||||
# Verify the build produced the executable
|
||||
if [ ! -x ~/_code/v/v ]; then
|
||||
echo "Error: V build failed, executable ~/_code/v/v not found or not executable."
|
||||
exit 1
|
||||
fi
|
||||
# Check if the built executable can report its version
|
||||
if ! ~/_code/v/v -version > /dev/null 2>&1; then
|
||||
echo "Error: Built V executable (~/_code/v/v) failed to report version."
|
||||
exit 1
|
||||
fi
|
||||
echo "V built successfully. Creating symlink..."
|
||||
run_sudo ./v symlink
|
||||
|
||||
# Verify v is in path
|
||||
if ! command_exists v; then
|
||||
@@ -446,9 +494,12 @@ v-install() {
|
||||
|
||||
v-analyzer() {
|
||||
|
||||
set -ex
|
||||
|
||||
# Install v-analyzer if requested
|
||||
if [ "$INSTALL_ANALYZER" = true ]; then
|
||||
echo "Installing v-analyzer..."
|
||||
cd /tmp
|
||||
v download -RD https://raw.githubusercontent.com/vlang/v-analyzer/main/install.vsh
|
||||
|
||||
# Check if v-analyzer bin directory exists
|
||||
@@ -517,10 +568,7 @@ if [ "$RESET" = true ] || ! command_exists v; then
|
||||
|
||||
v-install
|
||||
|
||||
# Only install v-analyzer if not in GitHub Actions environment
|
||||
if ! is_github_actions; then
|
||||
v-analyzer
|
||||
fi
|
||||
|
||||
|
||||
fi
|
||||
|
||||
@@ -534,6 +582,10 @@ fi
|
||||
|
||||
|
||||
if [ "$INSTALL_ANALYZER" = true ]; then
|
||||
# Only install v-analyzer if not in GitHub Actions environment
|
||||
if ! is_github_actions; then
|
||||
v-analyzer
|
||||
fi
|
||||
echo "Run 'source ~/.bashrc' or 'source ~/.zshrc' to update PATH for v-analyzer"
|
||||
fi
|
||||
|
||||
|
||||
BIN
jina.so.dylib
BIN
jina.so.dylib
Binary file not shown.
123
lib/ai/escalayer/README.md
Normal file
123
lib/ai/escalayer/README.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# Escalayer
|
||||
|
||||
Escalayer is a module for executing AI tasks with automatic escalation to more powerful models when needed. It provides a framework for creating complex AI workflows by breaking them down into sequential unit tasks.
|
||||
|
||||
## Overview
|
||||
|
||||
Escalayer allows you to:
|
||||
|
||||
1. Create complex AI tasks composed of multiple sequential unit tasks
|
||||
2. Execute each unit task with a cheap AI model first
|
||||
3. Automatically retry with a more powerful model if the task fails
|
||||
4. Process and validate AI responses with custom callback functions
|
||||
|
||||
## Architecture
|
||||
|
||||
The module is organized into the following components:
|
||||
|
||||
- **Task**: Represents a complete AI task composed of multiple sequential unit tasks
|
||||
- **UnitTask**: Represents a single step in the task with prompt generation and response processing
|
||||
- **ModelConfig**: Defines the configuration for an AI model
|
||||
- **OpenRouter Integration**: Uses OpenRouter to access a wide range of AI models
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Example
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
|
||||
|
||||
fn main() {
|
||||
// Create a new task
|
||||
mut task := escalayer.new_task(
|
||||
name: 'rhai_wrapper_creator'
|
||||
description: 'Create Rhai wrappers for Rust functions'
|
||||
)
|
||||
|
||||
// Define the unit tasks
|
||||
task.new_unit_task(
|
||||
name: 'separate_functions'
|
||||
prompt_function: separate_functions
|
||||
callback_function: process_functions
|
||||
)
|
||||
|
||||
// Initiate the task
|
||||
result := task.initiate('path/to/rust/file.rs') or {
|
||||
println('Task failed: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
println('Task completed successfully')
|
||||
println(result)
|
||||
}
|
||||
|
||||
// Define the prompt function
|
||||
fn separate_functions(input string) string {
|
||||
return 'Read rust file and separate it into functions ${input}'
|
||||
}
|
||||
|
||||
// Define the callback function
|
||||
fn process_functions(response string)! string {
|
||||
// Process the AI response
|
||||
// Return error if processing fails
|
||||
if response.contains('error') {
|
||||
return error('Failed to process functions: Invalid response format')
|
||||
}
|
||||
return response
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
You can configure each unit task with different models, retry counts, and other parameters:
|
||||
|
||||
```v
|
||||
// Configure with custom parameters
|
||||
task.new_unit_task(
|
||||
name: 'create_wrappers'
|
||||
prompt_function: create_wrappers
|
||||
callback_function: process_wrappers
|
||||
retry_count: 2
|
||||
base_model: escalayer.ModelConfig{
|
||||
name: 'claude-3-haiku-20240307'
|
||||
provider: 'anthropic'
|
||||
temperature: 0.5
|
||||
max_tokens: 4000
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
1. When you call `task.initiate(input)`, the first unit task is executed with its prompt function.
|
||||
2. The prompt is sent to the base AI model.
|
||||
3. The response is processed by the callback function.
|
||||
4. If the callback returns an error, the task is retried with the same model.
|
||||
5. After a specified number of retries, the task escalates to a more powerful model.
|
||||
6. Once a unit task succeeds, its result is passed as input to the next unit task.
|
||||
7. This process continues until all unit tasks are completed.
|
||||
|
||||
## Environment Setup
|
||||
|
||||
Escalayer uses OpenRouter for AI model access. Set the following environment variable:
|
||||
|
||||
```
|
||||
OPENROUTER_API_KEY=your_api_key_here
|
||||
```
|
||||
|
||||
You can get an API key from [OpenRouter](https://openrouter.ai/).
|
||||
|
||||
## Original Requirements
|
||||
|
||||
This module was designed based on the following requirements:
|
||||
|
||||
- Create a system for executing AI tasks with a retry mechanism
|
||||
- Escalate to more powerful models if cheaper models fail
|
||||
- Use OpenAI client over OpenRouter for AI calls
|
||||
- Break down complex tasks into sequential unit tasks
|
||||
- Each unit task has a function that generates a prompt and a callback that processes the response
|
||||
- Retry if the callback returns an error, with the error message prepended to the input string
|
||||
|
||||
For a detailed architecture overview, see [escalayer_architecture.md](./escalayer_architecture.md).
|
||||
|
||||
For a complete example, see [example.v](../servers/rhai).
|
||||
40
lib/ai/escalayer/escalayer.v
Normal file
40
lib/ai/escalayer/escalayer.v
Normal file
@@ -0,0 +1,40 @@
|
||||
module escalayer
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
|
||||
// TaskParams defines the parameters for creating a new task
|
||||
@[params]
|
||||
pub struct TaskParams {
|
||||
pub:
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// Create a new task
|
||||
pub fn new_task(params TaskParams) &Task {
|
||||
return &Task{
|
||||
name: params.name
|
||||
description: params.description
|
||||
unit_tasks: []
|
||||
current_result: ''
|
||||
}
|
||||
}
|
||||
|
||||
// Default model configurations
|
||||
pub fn default_base_model() ModelConfig {
|
||||
return ModelConfig{
|
||||
name: 'qwen2.5-7b-instruct'
|
||||
provider: 'openai'
|
||||
temperature: 0.7
|
||||
max_tokens: 2000
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_retry_model() ModelConfig {
|
||||
return ModelConfig{
|
||||
name: 'gpt-4'
|
||||
provider: 'openai'
|
||||
temperature: 0.7
|
||||
max_tokens: 4000
|
||||
}
|
||||
}
|
||||
342
lib/ai/escalayer/escalayer_architecture.md
Normal file
342
lib/ai/escalayer/escalayer_architecture.md
Normal file
@@ -0,0 +1,342 @@
|
||||
# Escalayer Architecture
|
||||
|
||||
This document outlines the architecture for the Escalayer module, which provides a framework for executing AI tasks with automatic escalation to more powerful models when needed.
|
||||
|
||||
## 1. Module Structure
|
||||
|
||||
```
|
||||
lib/mcp/aitools/escalayer/
|
||||
├── escalayer.v # Main module file with public API
|
||||
├── task.v # Task implementation
|
||||
├── unit_task.v # Unit task implementation
|
||||
├── models.v # Model definitions and configurations
|
||||
├── openrouter.v # OpenRouter API client
|
||||
└── README.md # Documentation
|
||||
```
|
||||
|
||||
## 2. Core Components
|
||||
|
||||
### 2.1 Data Structures
|
||||
|
||||
```mermaid
|
||||
classDiagram
|
||||
class Task {
|
||||
+string name
|
||||
+string description
|
||||
+[]UnitTask unit_tasks
|
||||
+string current_result
|
||||
+new_unit_task(params UnitTaskParams) UnitTask
|
||||
+initiate(input string)! string
|
||||
}
|
||||
|
||||
class UnitTask {
|
||||
+string name
|
||||
+Function prompt_function
|
||||
+Function callback_function
|
||||
+ModelConfig base_model
|
||||
+ModelConfig retry_model
|
||||
+int retry_count
|
||||
+execute(input string)! string
|
||||
}
|
||||
|
||||
class ModelConfig {
|
||||
+string name
|
||||
+string provider
|
||||
+float temperature
|
||||
+int max_tokens
|
||||
}
|
||||
|
||||
Task "1" *-- "many" UnitTask : contains
|
||||
UnitTask "1" *-- "1" ModelConfig : base_model
|
||||
UnitTask "1" *-- "1" ModelConfig : retry_model
|
||||
```
|
||||
|
||||
### 2.2 Component Descriptions
|
||||
|
||||
#### Task
|
||||
- Represents a complete AI task composed of multiple sequential unit tasks
|
||||
- Manages the flow of data between unit tasks
|
||||
- Tracks overall task progress and results
|
||||
|
||||
#### UnitTask
|
||||
- Represents a single step in the task
|
||||
- Contains a prompt function that generates the AI prompt
|
||||
- Contains a callback function that processes the AI response
|
||||
- Manages retries and model escalation
|
||||
|
||||
#### ModelConfig
|
||||
- Defines the configuration for an AI model
|
||||
- Includes model name, provider, and parameters like temperature and max tokens
|
||||
|
||||
#### OpenRouter Client
|
||||
- Handles communication with the OpenRouter API
|
||||
- Sends prompts to AI models and receives responses
|
||||
|
||||
## 3. Implementation Details
|
||||
|
||||
### 3.1 escalayer.v (Main Module)
|
||||
|
||||
```v
|
||||
module escalayer
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
|
||||
// TaskParams defines the parameters for creating a new task
|
||||
@[params]
|
||||
pub struct TaskParams {
|
||||
pub:
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// Create a new task
|
||||
pub fn new_task(params TaskParams) &Task {
|
||||
return &Task{
|
||||
name: params.name
|
||||
description: params.description
|
||||
unit_tasks: []
|
||||
current_result: ''
|
||||
}
|
||||
}
|
||||
|
||||
// Default model configurations
|
||||
pub fn default_base_model() ModelConfig {
|
||||
return ModelConfig{
|
||||
name: 'gpt-3.5-turbo'
|
||||
provider: 'openai'
|
||||
temperature: 0.7
|
||||
max_tokens: 20000
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_retry_model() ModelConfig {
|
||||
return ModelConfig{
|
||||
name: 'gpt-4'
|
||||
provider: 'openai'
|
||||
temperature: 0.7
|
||||
max_tokens: 40000
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.2 task.v
|
||||
|
||||
```v
|
||||
module escalayer
|
||||
|
||||
// Task represents a complete AI task composed of multiple sequential unit tasks
|
||||
pub struct Task {
|
||||
pub mut:
|
||||
name string
|
||||
description string
|
||||
unit_tasks []UnitTask
|
||||
current_result string
|
||||
}
|
||||
|
||||
// UnitTaskParams defines the parameters for creating a new unit task
|
||||
struct UnitTaskParams {
|
||||
name string
|
||||
prompt_function fn(string) string
|
||||
callback_function fn(string)! string
|
||||
base_model ?ModelConfig
|
||||
retry_model ?ModelConfig
|
||||
retry_count ?int
|
||||
}
|
||||
|
||||
// Add a new unit task to the task
|
||||
pub fn (mut t Task) new_unit_task(params UnitTaskParams) &UnitTask {
|
||||
}
|
||||
|
||||
// Initiate the task execution
|
||||
pub fn (mut t Task) initiate(input string)! string {
|
||||
|
||||
```
|
||||
|
||||
### 3.3 unit_task.v
|
||||
|
||||
```v
|
||||
module escalayer
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
|
||||
// UnitTask represents a single step in the task
|
||||
pub struct UnitTask {
|
||||
pub mut:
|
||||
name string
|
||||
prompt_function fn(string) string
|
||||
callback_function fn(string)! string
|
||||
base_model ModelConfig
|
||||
retry_model ModelConfig
|
||||
retry_count int
|
||||
}
|
||||
|
||||
// Execute the unit task
|
||||
pub fn (mut ut UnitTask) execute(input string)! string {
|
||||
}
|
||||
```
|
||||
|
||||
### 3.4 models.v
|
||||
|
||||
```v
|
||||
module escalayer
|
||||
|
||||
// ModelConfig defines the configuration for an AI model
|
||||
pub struct ModelConfig {
|
||||
pub mut:
|
||||
name string
|
||||
provider string
|
||||
temperature f32
|
||||
max_tokens int
|
||||
}
|
||||
|
||||
// Call an AI model using OpenRouter
|
||||
fn call_ai_model(prompt string, model ModelConfig)! string {
|
||||
// Get OpenAI client (configured for OpenRouter)
|
||||
mut client := get_openrouter_client()!
|
||||
|
||||
// Create the message for the AI
|
||||
mut m := openai.Messages{
|
||||
messages: [
|
||||
openai.Message{
|
||||
role: .system
|
||||
content: 'You are a helpful assistant.'
|
||||
},
|
||||
openai.Message{
|
||||
role: .user
|
||||
content: prompt
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
// Call the AI model
|
||||
res := client.chat_completion(
|
||||
msgs: m,
|
||||
model: model.name,
|
||||
temperature: model.temperature,
|
||||
max_completion_tokens: model.max_tokens
|
||||
)!
|
||||
|
||||
// Extract the response content
|
||||
if res.choices.len > 0 {
|
||||
return res.choices[0].message.content
|
||||
}
|
||||
|
||||
return error('No response from AI model')
|
||||
}
|
||||
```
|
||||
|
||||
### 3.5 openrouter.v
|
||||
|
||||
```v
|
||||
module escalayer
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
import os
|
||||
|
||||
// Get an OpenAI client configured for OpenRouter
|
||||
fn get_openrouter_client()! &openai.OpenAI {
|
||||
// Get API key from environment variable
|
||||
api_key := os.getenv('OPENROUTER_API_KEY')
|
||||
if api_key == '' {
|
||||
return error('OPENROUTER_API_KEY environment variable not set')
|
||||
}
|
||||
|
||||
// Create OpenAI client with OpenRouter base URL
|
||||
mut client := openai.new(
|
||||
api_key: api_key,
|
||||
base_url: 'https://openrouter.ai/api/v1'
|
||||
)!
|
||||
|
||||
return client
|
||||
}
|
||||
```
|
||||
|
||||
## 4. Usage Example
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
|
||||
|
||||
fn main() {
|
||||
// Create a new task
|
||||
mut task := escalayer.new_task(
|
||||
name: 'rhai_wrapper_creator'
|
||||
description: 'Create Rhai wrappers for Rust functions'
|
||||
)
|
||||
|
||||
// Define the unit tasks
|
||||
task.new_unit_task(
|
||||
name: 'separate_functions'
|
||||
prompt_function: separate_functions
|
||||
callback_function: process_functions
|
||||
)
|
||||
|
||||
task.new_unit_task(
|
||||
name: 'create_wrappers'
|
||||
prompt_function: create_wrappers
|
||||
callback_function: process_wrappers
|
||||
retry_count: 2
|
||||
)
|
||||
|
||||
task.new_unit_task(
|
||||
name: 'create_tests'
|
||||
prompt_function: create_tests
|
||||
callback_function: process_tests
|
||||
base_model: escalayer.ModelConfig{
|
||||
name: 'claude-3-haiku-20240307'
|
||||
provider: 'anthropic'
|
||||
temperature: 0.5
|
||||
max_tokens: 4000
|
||||
}
|
||||
)
|
||||
|
||||
// Initiate the task
|
||||
result := task.initiate('path/to/rust/file.rs') or {
|
||||
println('Task failed: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
println('Task completed successfully')
|
||||
println(result)
|
||||
}
|
||||
|
||||
// Define the prompt functions
|
||||
fn separate_functions(input string) string {
|
||||
return 'Read rust file and separate it into functions ${input}'
|
||||
}
|
||||
|
||||
fn create_wrappers(input string) string {
|
||||
return 'Create rhai wrappers for rust functions ${input}'
|
||||
}
|
||||
|
||||
fn create_tests(input string) string {
|
||||
return 'Create tests for rhai wrappers ${input}'
|
||||
}
|
||||
|
||||
// Define the callback functions
|
||||
fn process_functions(response string)! string {
|
||||
// Process the AI response
|
||||
// Return error if processing fails
|
||||
if response.contains('error') {
|
||||
return error('Failed to process functions: Invalid response format')
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
fn process_wrappers(response string)! string {
|
||||
// Process the AI response
|
||||
// Return error if processing fails
|
||||
if !response.contains('fn') {
|
||||
return error('Failed to process wrappers: No functions found')
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
fn process_tests(response string)! string {
|
||||
// Process the AI response
|
||||
// Return error if processing fails
|
||||
if !response.contains('test') {
|
||||
return error('Failed to process tests: No tests found')
|
||||
}
|
||||
return response
|
||||
}
|
||||
```
|
||||
62
lib/ai/escalayer/models.v
Normal file
62
lib/ai/escalayer/models.v
Normal file
@@ -0,0 +1,62 @@
|
||||
module escalayer
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
|
||||
// ModelConfig defines the configuration for an AI model
|
||||
pub struct ModelConfig {
|
||||
pub mut:
|
||||
name string
|
||||
provider string
|
||||
temperature f32
|
||||
max_tokens int
|
||||
}
|
||||
|
||||
// Create model configs
|
||||
const claude_3_sonnet = ModelConfig{
|
||||
name: 'anthropic/claude-3.7-sonnet'
|
||||
provider: 'anthropic'
|
||||
temperature: 0.7
|
||||
max_tokens: 25000
|
||||
}
|
||||
|
||||
const gpt4 = ModelConfig{
|
||||
name: 'gpt-4'
|
||||
provider: 'openai'
|
||||
temperature: 0.7
|
||||
max_tokens: 25000
|
||||
}
|
||||
|
||||
// Call an AI model using OpenRouter
|
||||
fn call_ai_model(prompt string, model ModelConfig) !string {
|
||||
// Get OpenAI client (configured for OpenRouter)
|
||||
mut client := get_openrouter_client()!
|
||||
|
||||
// Create the message for the AI
|
||||
mut m := openai.Messages{
|
||||
messages: [
|
||||
openai.Message{
|
||||
role: .system
|
||||
content: 'You are a helpful assistant.'
|
||||
},
|
||||
openai.Message{
|
||||
role: .user
|
||||
content: prompt
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
// Call the AI model
|
||||
res := client.chat_completion(
|
||||
msgs: m
|
||||
model: model.name
|
||||
temperature: model.temperature
|
||||
max_completion_tokens: model.max_tokens
|
||||
)!
|
||||
|
||||
// Extract the response content
|
||||
if res.choices.len > 0 {
|
||||
return res.choices[0].message.content
|
||||
}
|
||||
|
||||
return error('No response from AI model')
|
||||
}
|
||||
22
lib/ai/escalayer/openrouter.v
Normal file
22
lib/ai/escalayer/openrouter.v
Normal file
@@ -0,0 +1,22 @@
|
||||
module escalayer
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
import freeflowuniverse.herolib.osal
|
||||
import os
|
||||
|
||||
// Get an OpenAI client configured for OpenRouter
|
||||
fn get_openrouter_client() !&openai.OpenAI {
|
||||
osal.env_set(key: 'OPENROUTER_API_KEY', value: '')
|
||||
// Get API key from environment variable
|
||||
api_key := os.getenv('OPENROUTER_API_KEY')
|
||||
if api_key == '' {
|
||||
return error('OPENROUTER_API_KEY environment variable not set')
|
||||
}
|
||||
|
||||
// Create OpenAI client with OpenRouter base URL
|
||||
mut client := openai.get(
|
||||
name: 'openrouter'
|
||||
)!
|
||||
|
||||
return client
|
||||
}
|
||||
65
lib/ai/escalayer/task.v
Normal file
65
lib/ai/escalayer/task.v
Normal file
@@ -0,0 +1,65 @@
|
||||
module escalayer
|
||||
|
||||
import log
|
||||
|
||||
// Task represents a complete AI task composed of multiple sequential unit tasks
|
||||
pub struct Task {
|
||||
pub mut:
|
||||
name string
|
||||
description string
|
||||
unit_tasks []UnitTask
|
||||
current_result string
|
||||
}
|
||||
|
||||
// UnitTaskParams defines the parameters for creating a new unit task
|
||||
@[params]
|
||||
pub struct UnitTaskParams {
|
||||
pub:
|
||||
name string
|
||||
prompt_function fn (string) string
|
||||
callback_function fn (string) !string
|
||||
base_model ?ModelConfig
|
||||
retry_model ?ModelConfig
|
||||
retry_count ?int
|
||||
}
|
||||
|
||||
// Add a new unit task to the task
|
||||
pub fn (mut t Task) new_unit_task(params UnitTaskParams) &UnitTask {
|
||||
mut unit_task := UnitTask{
|
||||
name: params.name
|
||||
prompt_function: params.prompt_function
|
||||
callback_function: params.callback_function
|
||||
base_model: if base_model := params.base_model {
|
||||
base_model
|
||||
} else {
|
||||
default_base_model()
|
||||
}
|
||||
retry_model: if retry_model := params.retry_model {
|
||||
retry_model
|
||||
} else {
|
||||
default_retry_model()
|
||||
}
|
||||
retry_count: if retry_count := params.retry_count { retry_count } else { 3 }
|
||||
}
|
||||
|
||||
t.unit_tasks << unit_task
|
||||
return &t.unit_tasks[t.unit_tasks.len - 1]
|
||||
}
|
||||
|
||||
// Initiate the task execution
|
||||
pub fn (mut t Task) initiate(input string) !string {
|
||||
mut current_input := input
|
||||
|
||||
for i, mut unit_task in t.unit_tasks {
|
||||
log.error('Executing unit task ${i + 1}/${t.unit_tasks.len}: ${unit_task.name}')
|
||||
|
||||
// Execute the unit task with the current input
|
||||
result := unit_task.execute(current_input)!
|
||||
|
||||
// Update the current input for the next unit task
|
||||
current_input = result
|
||||
t.current_result = result
|
||||
}
|
||||
|
||||
return t.current_result
|
||||
}
|
||||
71
lib/ai/escalayer/unit_task.v
Normal file
71
lib/ai/escalayer/unit_task.v
Normal file
@@ -0,0 +1,71 @@
|
||||
module escalayer
|
||||
|
||||
import log
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
|
||||
// UnitTask represents a single step in the task
|
||||
pub struct UnitTask {
|
||||
pub mut:
|
||||
name string
|
||||
prompt_function fn (string) string
|
||||
callback_function fn (string) !string
|
||||
base_model ModelConfig
|
||||
retry_model ModelConfig
|
||||
retry_count int
|
||||
}
|
||||
|
||||
// Execute the unit task
|
||||
pub fn (mut ut UnitTask) execute(input string) !string {
|
||||
// Generate the prompt using the prompt function
|
||||
prompt := ut.prompt_function(input)
|
||||
|
||||
// Try with the base model first
|
||||
mut current_model := ut.base_model
|
||||
mut attempts := 0
|
||||
mut max_attempts := ut.retry_count + 1 // +1 for the initial attempt
|
||||
mut absolute_max_attempts := 1 // Hard limit on total attempts
|
||||
mut last_error := ''
|
||||
|
||||
for attempts < max_attempts && attempts < absolute_max_attempts {
|
||||
attempts++
|
||||
|
||||
// If we've exhausted retries with the base model, switch to the retry model
|
||||
if attempts > ut.retry_count {
|
||||
log.error('Escalating to more powerful model: ${ut.retry_model.name}')
|
||||
current_model = ut.retry_model
|
||||
// Calculate remaining attempts but don't exceed absolute max
|
||||
max_attempts = attempts + ut.retry_count
|
||||
if max_attempts > absolute_max_attempts {
|
||||
max_attempts = absolute_max_attempts
|
||||
}
|
||||
}
|
||||
|
||||
log.error('Attempt ${attempts} with model ${current_model.name}')
|
||||
|
||||
// Prepare the prompt with error feedback if this is a retry
|
||||
mut current_prompt := prompt
|
||||
if last_error != '' {
|
||||
current_prompt = 'Previous attempt failed with error: ${last_error}\n\n${prompt}'
|
||||
}
|
||||
|
||||
// Call the AI model
|
||||
response := call_ai_model(current_prompt, current_model) or {
|
||||
log.error('AI call failed: ${err}')
|
||||
last_error = err.str()
|
||||
continue // Try again
|
||||
}
|
||||
|
||||
// Process the response with the callback function
|
||||
result := ut.callback_function(response) or {
|
||||
// If callback returns an error, retry with the error message
|
||||
log.error('Callback returned error: ${err}')
|
||||
last_error = err.str()
|
||||
continue // Try again
|
||||
}
|
||||
|
||||
// If we get here, the callback was successful
|
||||
return result
|
||||
}
|
||||
|
||||
return error('Failed to execute unit task after ${attempts} attempts. Last error: ${last_error}')
|
||||
}
|
||||
68
lib/ai/mcp/README.md
Normal file
68
lib/ai/mcp/README.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Model Context Protocol (MCP) Implementation
|
||||
|
||||
This module provides a V language implementation of the [Model Context Protocol (MCP)](https://spec.modelcontextprotocol.io/specification/2024-11-05/) specification. MCP is a protocol designed to standardize communication between AI models and their context providers.
|
||||
|
||||
## Overview
|
||||
|
||||
The MCP module serves as a core library for building MCP-compliant servers in V. Its main purpose is to provide all the boilerplate MCP functionality, so implementers only need to define and register their specific handlers. The module handles the Standard Input/Output (stdio) transport as described in the [MCP transport specification](https://modelcontextprotocol.io/docs/concepts/transports), enabling standardized communication between AI models and their context providers.
|
||||
|
||||
The module implements all the required MCP protocol methods (resources/list, tools/list, prompts/list, etc.) and manages the underlying JSON-RPC communication, allowing developers to focus solely on implementing their specific tools and handlers. The module itself is not a standalone server but rather a framework that can be used to build different MCP server implementations. The subdirectories within this module (such as `baobab` and `developer`) contain specific implementations of MCP servers that utilize this core framework.
|
||||
|
||||
## to test
|
||||
|
||||
```
|
||||
curl -fsSL https://bun.sh/install | bash
|
||||
source /root/.bashrc
|
||||
```
|
||||
|
||||
## Key Components
|
||||
|
||||
- **Server**: The main MCP server struct that handles JSON-RPC requests and responses
|
||||
- **Backend Interface**: Abstraction for different backend implementations (memory-based by default)
|
||||
- **Model Configuration**: Structures representing client and server capabilities according to the MCP specification
|
||||
- **Protocol Handlers**: Implementation of MCP protocol handlers for resources, prompts, tools, and initialization
|
||||
- **Factory**: Functions to create and configure an MCP server with custom backends and handlers
|
||||
|
||||
## Features
|
||||
|
||||
- Complete implementation of the MCP protocol version 2024-11-05
|
||||
- Handles all boilerplate protocol methods (resources/list, tools/list, prompts/list, etc.)
|
||||
- JSON-RPC based communication layer with automatic request/response handling
|
||||
- Support for client-server capability negotiation
|
||||
- Pluggable backend system for different storage and processing needs
|
||||
- Generic type conversion utilities for MCP tool content
|
||||
- Comprehensive error handling
|
||||
- Logging capabilities
|
||||
- Minimal implementation requirements for server developers
|
||||
|
||||
## Usage
|
||||
|
||||
To create a new MCP server using the core module:
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
// Create a backend (memory-based or custom implementation)
|
||||
backend := mcp.MemoryBackend{
|
||||
tools: {
|
||||
'my_tool': my_tool_definition
|
||||
}
|
||||
tool_handlers: {
|
||||
'my_tool': my_tool_handler
|
||||
}
|
||||
}
|
||||
|
||||
// Create and configure the server
|
||||
mut server := mcp.new_server(backend, mcp.ServerParams{
|
||||
config: mcp.ServerConfiguration{
|
||||
server_info: mcp.ServerInfo{
|
||||
name: 'my_mcp_server'
|
||||
version: '1.0.0'
|
||||
}
|
||||
}
|
||||
})!
|
||||
|
||||
// Start the server
|
||||
server.start()!
|
||||
```
|
||||
3
lib/ai/mcp/README2.md
Normal file
3
lib/ai/mcp/README2.md
Normal file
@@ -0,0 +1,3 @@
|
||||
|
||||
|
||||
If logic is implemented in mcp module, than structure with folders logic and mcp, where logic residers in /logic and mcp related code (like tool and prompt handlers and server code) in /mcp
|
||||
32
lib/ai/mcp/backend_interface.v
Normal file
32
lib/ai/mcp/backend_interface.v
Normal file
@@ -0,0 +1,32 @@
|
||||
module mcp
|
||||
|
||||
import x.json2
|
||||
|
||||
interface Backend {
|
||||
// Resource methods
|
||||
resource_exists(uri string) !bool
|
||||
resource_get(uri string) !Resource
|
||||
resource_list() ![]Resource
|
||||
resource_subscribed(uri string) !bool
|
||||
resource_contents_get(uri string) ![]ResourceContent
|
||||
resource_templates_list() ![]ResourceTemplate
|
||||
|
||||
// Prompt methods
|
||||
prompt_exists(name string) !bool
|
||||
prompt_get(name string) !Prompt
|
||||
prompt_call(name string, arguments []string) ![]PromptMessage
|
||||
prompt_list() ![]Prompt
|
||||
prompt_messages_get(name string, arguments map[string]string) ![]PromptMessage
|
||||
|
||||
// Tool methods
|
||||
tool_exists(name string) !bool
|
||||
tool_get(name string) !Tool
|
||||
tool_list() ![]Tool
|
||||
tool_call(name string, arguments map[string]json2.Any) !ToolCallResult
|
||||
|
||||
// Sampling methods
|
||||
sampling_create_message(params map[string]json2.Any) !SamplingCreateMessageResult
|
||||
mut:
|
||||
resource_subscribe(uri string) !
|
||||
resource_unsubscribe(uri string) !
|
||||
}
|
||||
183
lib/ai/mcp/backend_memory.v
Normal file
183
lib/ai/mcp/backend_memory.v
Normal file
@@ -0,0 +1,183 @@
|
||||
module mcp
|
||||
|
||||
import x.json2
|
||||
|
||||
pub struct MemoryBackend {
|
||||
pub mut:
|
||||
// Resource related fields
|
||||
resources map[string]Resource
|
||||
subscriptions []string // list of subscribed resource uri's
|
||||
resource_contents map[string][]ResourceContent
|
||||
resource_templates map[string]ResourceTemplate
|
||||
|
||||
// Prompt related fields
|
||||
prompts map[string]Prompt
|
||||
prompt_messages map[string][]PromptMessage
|
||||
prompt_handlers map[string]PromptHandler
|
||||
|
||||
// Tool related fields
|
||||
tools map[string]Tool
|
||||
tool_handlers map[string]ToolHandler
|
||||
|
||||
// Sampling related fields
|
||||
sampling_handler SamplingHandler
|
||||
}
|
||||
|
||||
pub type ToolHandler = fn (arguments map[string]json2.Any) !ToolCallResult
|
||||
|
||||
pub type PromptHandler = fn (arguments []string) ![]PromptMessage
|
||||
|
||||
pub type SamplingHandler = fn (params map[string]json2.Any) !SamplingCreateMessageResult
|
||||
|
||||
fn (b &MemoryBackend) resource_exists(uri string) !bool {
|
||||
return uri in b.resources
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) resource_get(uri string) !Resource {
|
||||
return b.resources[uri] or { return error('resource not found') }
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) resource_list() ![]Resource {
|
||||
return b.resources.values()
|
||||
}
|
||||
|
||||
fn (mut b MemoryBackend) resource_subscribe(uri string) ! {
|
||||
if uri !in b.subscriptions {
|
||||
b.subscriptions << uri
|
||||
}
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) resource_subscribed(uri string) !bool {
|
||||
return uri in b.subscriptions
|
||||
}
|
||||
|
||||
fn (mut b MemoryBackend) resource_unsubscribe(uri string) ! {
|
||||
b.subscriptions = b.subscriptions.filter(it != uri)
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) resource_contents_get(uri string) ![]ResourceContent {
|
||||
return b.resource_contents[uri] or { return error('resource contents not found') }
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) resource_templates_list() ![]ResourceTemplate {
|
||||
return b.resource_templates.values()
|
||||
}
|
||||
|
||||
// Prompt related methods
|
||||
|
||||
fn (b &MemoryBackend) prompt_exists(name string) !bool {
|
||||
return name in b.prompts
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) prompt_get(name string) !Prompt {
|
||||
return b.prompts[name] or { return error('prompt not found') }
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) prompt_list() ![]Prompt {
|
||||
return b.prompts.values()
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) prompt_messages_get(name string, arguments map[string]string) ![]PromptMessage {
|
||||
// Get the base messages for this prompt
|
||||
base_messages := b.prompt_messages[name] or { return error('prompt messages not found') }
|
||||
|
||||
// Apply arguments to the messages
|
||||
mut messages := []PromptMessage{}
|
||||
|
||||
for msg in base_messages {
|
||||
mut content := msg.content
|
||||
|
||||
// If the content is text, replace argument placeholders
|
||||
if content.typ == 'text' {
|
||||
mut text := content.text
|
||||
|
||||
// Replace each argument in the text
|
||||
for arg_name, arg_value in arguments {
|
||||
text = text.replace('{{${arg_name}}}', arg_value)
|
||||
}
|
||||
|
||||
content = PromptContent{
|
||||
typ: content.typ
|
||||
text: text
|
||||
data: content.data
|
||||
mimetype: content.mimetype
|
||||
resource: content.resource
|
||||
}
|
||||
}
|
||||
|
||||
messages << PromptMessage{
|
||||
role: msg.role
|
||||
content: content
|
||||
}
|
||||
}
|
||||
|
||||
return messages
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) prompt_call(name string, arguments []string) ![]PromptMessage {
|
||||
// Get the tool handler
|
||||
handler := b.prompt_handlers[name] or { return error('tool handler not found') }
|
||||
|
||||
// Call the handler with the provided arguments
|
||||
return handler(arguments) or { panic(err) }
|
||||
}
|
||||
|
||||
// Tool related methods
|
||||
|
||||
fn (b &MemoryBackend) tool_exists(name string) !bool {
|
||||
return name in b.tools
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) tool_get(name string) !Tool {
|
||||
return b.tools[name] or { return error('tool not found') }
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) tool_list() ![]Tool {
|
||||
return b.tools.values()
|
||||
}
|
||||
|
||||
fn (b &MemoryBackend) tool_call(name string, arguments map[string]json2.Any) !ToolCallResult {
|
||||
// Get the tool handler
|
||||
handler := b.tool_handlers[name] or { return error('tool handler not found') }
|
||||
|
||||
// Call the handler with the provided arguments
|
||||
return handler(arguments) or {
|
||||
// If the handler throws an error, return it as a tool error
|
||||
return ToolCallResult{
|
||||
is_error: true
|
||||
content: [
|
||||
ToolContent{
|
||||
typ: 'text'
|
||||
text: 'Error: ${err.msg()}'
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sampling related methods
|
||||
|
||||
fn (b &MemoryBackend) sampling_create_message(params map[string]json2.Any) !SamplingCreateMessageResult {
|
||||
// Check if a sampling handler is registered
|
||||
if isnil(b.sampling_handler) {
|
||||
// Return a default implementation that just echoes back a message
|
||||
// indicating that no sampling handler is registered
|
||||
return SamplingCreateMessageResult{
|
||||
model: 'default'
|
||||
stop_reason: 'endTurn'
|
||||
role: 'assistant'
|
||||
content: MessageContent{
|
||||
typ: 'text'
|
||||
text: 'Sampling is not configured on this server. Please register a sampling handler.'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Call the sampling handler with the provided parameters
|
||||
return b.sampling_handler(params)!
|
||||
}
|
||||
|
||||
// register_sampling_handler registers a handler for sampling requests
|
||||
pub fn (mut b MemoryBackend) register_sampling_handler(handler SamplingHandler) {
|
||||
b.sampling_handler = handler
|
||||
}
|
||||
3
lib/ai/mcp/baobab/README.md
Normal file
3
lib/ai/mcp/baobab/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## Baobab MCP
|
||||
|
||||
The Base Object and Actor Backend MCP Server provides tools to easily generate BaObAB modules for a given OpenAPI or OpenRPC Schema.
|
||||
172
lib/ai/mcp/baobab/baobab_tools.v
Normal file
172
lib/ai/mcp/baobab/baobab_tools.v
Normal file
@@ -0,0 +1,172 @@
|
||||
module baobab
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.schemas.jsonschema
|
||||
import freeflowuniverse.herolib.core.code
|
||||
import x.json2 as json { Any }
|
||||
import freeflowuniverse.herolib.baobab.generator
|
||||
import freeflowuniverse.herolib.baobab.specification
|
||||
|
||||
// generate_methods_file MCP Tool
|
||||
//
|
||||
|
||||
const generate_methods_file_tool = mcp.Tool{
|
||||
name: 'generate_methods_file'
|
||||
description: 'Generates a methods file with methods for a backend corresponding to thos specified in an OpenAPI or OpenRPC specification'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'source': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
required: ['source']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &Baobab) generate_methods_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
source := json.decode[generator.Source](arguments['source'].str())!
|
||||
result := generator.generate_methods_file_str(source) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: mcp.result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
|
||||
// generate_module_from_openapi MCP Tool
|
||||
const generate_module_from_openapi_tool = mcp.Tool{
|
||||
name: 'generate_module_from_openapi'
|
||||
description: ''
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
}
|
||||
required: ['openapi_path']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &Baobab) generate_module_from_openapi_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
openapi_path := arguments['openapi_path'].str()
|
||||
result := generator.generate_module_from_openapi(openapi_path) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: mcp.result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
|
||||
// generate_methods_interface_file MCP Tool
|
||||
const generate_methods_interface_file_tool = mcp.Tool{
|
||||
name: 'generate_methods_interface_file'
|
||||
description: 'Generates a methods interface file with method interfaces for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'source': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
required: ['source']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &Baobab) generate_methods_interface_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
source := json.decode[generator.Source](arguments['source'].str())!
|
||||
result := generator.generate_methods_interface_file_str(source) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: mcp.result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
|
||||
// generate_model_file MCP Tool
|
||||
const generate_model_file_tool = mcp.Tool{
|
||||
name: 'generate_model_file'
|
||||
description: 'Generates a model file with data structures for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'source': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
required: ['source']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &Baobab) generate_model_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
source := json.decode[generator.Source](arguments['source'].str())!
|
||||
result := generator.generate_model_file_str(source) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: mcp.result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
|
||||
// generate_methods_example_file MCP Tool
|
||||
const generate_methods_example_file_tool = mcp.Tool{
|
||||
name: 'generate_methods_example_file'
|
||||
description: 'Generates a methods example file with example implementations for a backend corresponding to those specified in an OpenAPI or OpenRPC specification'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'source': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'openapi_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
'openrpc_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
required: ['source']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &Baobab) generate_methods_example_file_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
source := json.decode[generator.Source](arguments['source'].str())!
|
||||
result := generator.generate_methods_example_file_str(source) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: mcp.result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
101
lib/ai/mcp/baobab/baobab_tools_test.v
Normal file
101
lib/ai/mcp/baobab/baobab_tools_test.v
Normal file
@@ -0,0 +1,101 @@
|
||||
module baobab
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
import json
|
||||
import x.json2
|
||||
import os
|
||||
|
||||
// This file contains tests for the Baobab tools implementation.
|
||||
// It tests the tools' ability to handle tool calls and return expected results.
|
||||
|
||||
// test_generate_module_from_openapi_tool tests the generate_module_from_openapi tool definition
|
||||
fn test_generate_module_from_openapi_tool() {
|
||||
// Verify the tool definition
|
||||
assert generate_module_from_openapi_tool.name == 'generate_module_from_openapi', 'Tool name should be "generate_module_from_openapi"'
|
||||
|
||||
// Verify the input schema
|
||||
assert generate_module_from_openapi_tool.input_schema.typ == 'object', 'Input schema type should be "object"'
|
||||
assert 'openapi_path' in generate_module_from_openapi_tool.input_schema.properties, 'Input schema should have "openapi_path" property'
|
||||
assert generate_module_from_openapi_tool.input_schema.properties['openapi_path'].typ == 'string', 'openapi_path property should be of type "string"'
|
||||
assert 'openapi_path' in generate_module_from_openapi_tool.input_schema.required, 'openapi_path should be a required property'
|
||||
}
|
||||
|
||||
// test_generate_module_from_openapi_tool_handler_error tests the error handling of the generate_module_from_openapi tool handler
|
||||
fn test_generate_module_from_openapi_tool_handler_error() {
|
||||
// Create arguments with a non-existent file path
|
||||
mut arguments := map[string]json2.Any{}
|
||||
arguments['openapi_path'] = json2.Any('non_existent_file.yaml')
|
||||
|
||||
// Call the handler
|
||||
result := generate_module_from_openapi_tool_handler(arguments) or {
|
||||
// If the handler returns an error, that's expected
|
||||
assert err.msg().contains(''), 'Error message should not be empty'
|
||||
return
|
||||
}
|
||||
|
||||
// If we get here, the handler should have returned an error result
|
||||
assert result.is_error, 'Result should indicate an error'
|
||||
assert result.content.len > 0, 'Error content should not be empty'
|
||||
assert result.content[0].typ == 'text', 'Error content should be of type "text"'
|
||||
assert result.content[0].text.contains('failed to open file'), 'Error content should contain "failed to open file", instead ${result.content[0].text}'
|
||||
}
|
||||
|
||||
// test_mcp_tool_call_integration tests the integration of the tool with the MCP server
|
||||
fn test_mcp_tool_call_integration() {
|
||||
// Create a new MCP server
|
||||
mut server := new_mcp_server() or {
|
||||
assert false, 'Failed to create MCP server: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Create a temporary OpenAPI file for testing
|
||||
temp_dir := os.temp_dir()
|
||||
temp_file := os.join_path(temp_dir, 'test_openapi.yaml')
|
||||
os.write_file(temp_file, 'openapi: 3.0.0\ninfo:\n title: Test API\n version: 1.0.0\npaths:\n /test:\n get:\n summary: Test endpoint\n responses:\n "200":\n description: OK') or {
|
||||
assert false, 'Failed to create temporary file: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Sample tool call request
|
||||
tool_call_request := '{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"generate_module_from_openapi","arguments":{"openapi_path":"${temp_file}"}}}'
|
||||
|
||||
// Process the request through the handler
|
||||
response := server.handler.handle(tool_call_request) or {
|
||||
// Clean up the temporary file
|
||||
os.rm(temp_file) or {}
|
||||
|
||||
// If the handler returns an error, that's expected in this test environment
|
||||
// since we might not have all dependencies set up
|
||||
return
|
||||
}
|
||||
|
||||
// Clean up the temporary file
|
||||
os.rm(temp_file) or {}
|
||||
|
||||
// Decode the response to verify its structure
|
||||
decoded_response := jsonrpc.decode_response(response) or {
|
||||
// In a test environment, we might get an error due to missing dependencies
|
||||
// This is acceptable for this test
|
||||
return
|
||||
}
|
||||
|
||||
// If we got a successful response, verify it
|
||||
if !decoded_response.is_error() {
|
||||
// Parse the result to verify its contents
|
||||
result_json := decoded_response.result() or {
|
||||
assert false, 'Failed to get result: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the result to check the content
|
||||
result_map := json2.raw_decode(result_json) or {
|
||||
assert false, 'Failed to decode result: ${err}'
|
||||
return
|
||||
}.as_map()
|
||||
|
||||
// Verify the result structure
|
||||
assert 'isError' in result_map, 'Result should have isError field'
|
||||
assert 'content' in result_map, 'Result should have content field'
|
||||
}
|
||||
}
|
||||
22
lib/ai/mcp/baobab/command.v
Normal file
22
lib/ai/mcp/baobab/command.v
Normal file
@@ -0,0 +1,22 @@
|
||||
module baobab
|
||||
|
||||
import cli
|
||||
|
||||
pub const command = cli.Command{
|
||||
sort_flags: true
|
||||
name: 'baobab'
|
||||
// execute: cmd_mcpgen
|
||||
description: 'baobab command'
|
||||
commands: [
|
||||
cli.Command{
|
||||
name: 'start'
|
||||
execute: cmd_start
|
||||
description: 'start the Baobab server'
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
fn cmd_start(cmd cli.Command) ! {
|
||||
mut server := new_mcp_server(&Baobab{})!
|
||||
server.start()!
|
||||
}
|
||||
128
lib/ai/mcp/baobab/mcp_test.v
Normal file
128
lib/ai/mcp/baobab/mcp_test.v
Normal file
@@ -0,0 +1,128 @@
|
||||
module baobab
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
import json
|
||||
import x.json2
|
||||
|
||||
// This file contains tests for the Baobab MCP server implementation.
|
||||
// It tests the server's ability to initialize and handle tool calls.
|
||||
|
||||
// test_new_mcp_server tests the creation of a new MCP server for the Baobab module
|
||||
fn test_new_mcp_server() {
|
||||
// Create a new MCP server
|
||||
mut server := new_mcp_server() or {
|
||||
assert false, 'Failed to create MCP server: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify server info
|
||||
assert server.server_info.name == 'developer', 'Server name should be "developer"'
|
||||
assert server.server_info.version == '1.0.0', 'Server version should be 1.0.0'
|
||||
|
||||
// Verify server capabilities
|
||||
assert server.capabilities.prompts.list_changed == true, 'Prompts capability should have list_changed set to true'
|
||||
assert server.capabilities.resources.subscribe == true, 'Resources capability should have subscribe set to true'
|
||||
assert server.capabilities.resources.list_changed == true, 'Resources capability should have list_changed set to true'
|
||||
assert server.capabilities.tools.list_changed == true, 'Tools capability should have list_changed set to true'
|
||||
}
|
||||
|
||||
// test_mcp_server_initialize tests the initialize handler with a sample initialize request
|
||||
fn test_mcp_server_initialize() {
|
||||
// Create a new MCP server
|
||||
mut server := new_mcp_server() or {
|
||||
assert false, 'Failed to create MCP server: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Sample initialize request from the MCP specification
|
||||
initialize_request := '{"jsonrpc":"2.0","id":0,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{"sampling":{},"roots":{"listChanged":true}},"clientInfo":{"name":"mcp-inspector","version":"0.0.1"}}}'
|
||||
|
||||
// Process the request through the handler
|
||||
response := server.handler.handle(initialize_request) or {
|
||||
assert false, 'Handler failed to process request: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the response to verify its structure
|
||||
decoded_response := jsonrpc.decode_response(response) or {
|
||||
assert false, 'Failed to decode response: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify that the response is not an error
|
||||
assert !decoded_response.is_error(), 'Response should not be an error'
|
||||
|
||||
// Parse the result to verify its contents
|
||||
result_json := decoded_response.result() or {
|
||||
assert false, 'Failed to get result: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the result into an ServerConfiguration struct
|
||||
result := json.decode(mcp.ServerConfiguration, result_json) or {
|
||||
assert false, 'Failed to decode result: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the protocol version matches what was requested
|
||||
assert result.protocol_version == '2024-11-05', 'Protocol version should match the request'
|
||||
|
||||
// Verify server info
|
||||
assert result.server_info.name == 'developer', 'Server name should be "developer"'
|
||||
}
|
||||
|
||||
// test_tools_list tests the tools/list handler to verify tool registration
|
||||
fn test_tools_list() {
|
||||
// Create a new MCP server
|
||||
mut server := new_mcp_server() or {
|
||||
assert false, 'Failed to create MCP server: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Sample tools/list request
|
||||
tools_list_request := '{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{"cursor":""}}'
|
||||
|
||||
// Process the request through the handler
|
||||
response := server.handler.handle(tools_list_request) or {
|
||||
assert false, 'Handler failed to process request: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the response to verify its structure
|
||||
decoded_response := jsonrpc.decode_response(response) or {
|
||||
assert false, 'Failed to decode response: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify that the response is not an error
|
||||
assert !decoded_response.is_error(), 'Response should not be an error'
|
||||
|
||||
// Parse the result to verify its contents
|
||||
result_json := decoded_response.result() or {
|
||||
assert false, 'Failed to get result: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the result into a map to check the tools
|
||||
result_map := json2.raw_decode(result_json) or {
|
||||
assert false, 'Failed to decode result: ${err}'
|
||||
return
|
||||
}.as_map()
|
||||
|
||||
// Verify that the tools array exists and contains the expected tool
|
||||
tools := result_map['tools'].arr()
|
||||
assert tools.len > 0, 'Tools list should not be empty'
|
||||
|
||||
// Find the generate_module_from_openapi tool
|
||||
mut found_tool := false
|
||||
for tool in tools {
|
||||
tool_map := tool.as_map()
|
||||
if tool_map['name'].str() == 'generate_module_from_openapi' {
|
||||
found_tool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert found_tool, 'generate_module_from_openapi tool should be registered'
|
||||
}
|
||||
38
lib/ai/mcp/baobab/server.v
Normal file
38
lib/ai/mcp/baobab/server.v
Normal file
@@ -0,0 +1,38 @@
|
||||
module baobab
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.ai.mcp.logger
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
@[heap]
|
||||
pub struct Baobab {}
|
||||
|
||||
pub fn new_mcp_server(v &Baobab) !&mcp.Server {
|
||||
logger.info('Creating new Baobab MCP server')
|
||||
|
||||
// Initialize the server with the empty handlers map
|
||||
mut server := mcp.new_server(mcp.MemoryBackend{
|
||||
tools: {
|
||||
'generate_module_from_openapi': generate_module_from_openapi_tool
|
||||
'generate_methods_file': generate_methods_file_tool
|
||||
'generate_methods_interface_file': generate_methods_interface_file_tool
|
||||
'generate_model_file': generate_model_file_tool
|
||||
'generate_methods_example_file': generate_methods_example_file_tool
|
||||
}
|
||||
tool_handlers: {
|
||||
'generate_module_from_openapi': v.generate_module_from_openapi_tool_handler
|
||||
'generate_methods_file': v.generate_methods_file_tool_handler
|
||||
'generate_methods_interface_file': v.generate_methods_interface_file_tool_handler
|
||||
'generate_model_file': v.generate_model_file_tool_handler
|
||||
'generate_methods_example_file': v.generate_methods_example_file_tool_handler
|
||||
}
|
||||
}, mcp.ServerParams{
|
||||
config: mcp.ServerConfiguration{
|
||||
server_info: mcp.ServerInfo{
|
||||
name: 'baobab'
|
||||
version: '1.0.0'
|
||||
}
|
||||
}
|
||||
})!
|
||||
return server
|
||||
}
|
||||
68
lib/ai/mcp/cmd/compile.vsh
Executable file
68
lib/ai/mcp/cmd/compile.vsh
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env -S v -n -cg -w -parallel-cc -enable-globals run
|
||||
|
||||
import os
|
||||
import flag
|
||||
|
||||
mut fp := flag.new_flag_parser(os.args)
|
||||
fp.application('compile.vsh')
|
||||
fp.version('v0.1.0')
|
||||
fp.description('Compile MCP binary in debug or production mode')
|
||||
fp.skip_executable()
|
||||
|
||||
prod_mode := fp.bool('prod', `p`, false, 'Build production version (optimized)')
|
||||
help_requested := fp.bool('help', `h`, false, 'Show help message')
|
||||
|
||||
if help_requested {
|
||||
println(fp.usage())
|
||||
exit(0)
|
||||
}
|
||||
|
||||
additional_args := fp.finalize() or {
|
||||
eprintln(err)
|
||||
println(fp.usage())
|
||||
exit(1)
|
||||
}
|
||||
|
||||
if additional_args.len > 0 {
|
||||
eprintln('Unexpected arguments: ${additional_args.join(' ')}')
|
||||
println(fp.usage())
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Change to the mcp directory
|
||||
mcp_dir := os.dir(os.real_path(os.executable()))
|
||||
os.chdir(mcp_dir) or { panic('Failed to change directory to ${mcp_dir}: ${err}') }
|
||||
|
||||
// Set MCPPATH based on OS
|
||||
mut mcppath := '/usr/local/bin/mcp'
|
||||
if os.user_os() == 'macos' {
|
||||
mcppath = os.join_path(os.home_dir(), 'hero/bin/mcp')
|
||||
}
|
||||
|
||||
// Set compilation command based on OS and mode
|
||||
compile_cmd := if prod_mode {
|
||||
'v -enable-globals -w -n -prod mcp.v'
|
||||
} else {
|
||||
'v -w -cg -gc none -cc tcc -d use_openssl -enable-globals mcp.v'
|
||||
}
|
||||
|
||||
println('Building MCP in ${if prod_mode { 'production' } else { 'debug' }} mode...')
|
||||
|
||||
if os.system(compile_cmd) != 0 {
|
||||
panic('Failed to compile mcp.v with command: ${compile_cmd}')
|
||||
}
|
||||
|
||||
// Make executable
|
||||
os.chmod('mcp', 0o755) or { panic('Failed to make mcp binary executable: ${err}') }
|
||||
|
||||
// Ensure destination directory exists
|
||||
os.mkdir_all(os.dir(mcppath)) or { panic('Failed to create directory ${os.dir(mcppath)}: ${err}') }
|
||||
|
||||
// Copy to destination paths
|
||||
os.cp('mcp', mcppath) or { panic('Failed to copy mcp binary to ${mcppath}: ${err}') }
|
||||
os.cp('mcp', '/tmp/mcp') or { panic('Failed to copy mcp binary to /tmp/mcp: ${err}') }
|
||||
|
||||
// Clean up
|
||||
os.rm('mcp') or { panic('Failed to remove temporary mcp binary: ${err}') }
|
||||
|
||||
println('**MCP COMPILE OK**')
|
||||
93
lib/ai/mcp/cmd/mcp.v
Normal file
93
lib/ai/mcp/cmd/mcp.v
Normal file
@@ -0,0 +1,93 @@
|
||||
module main
|
||||
|
||||
import os
|
||||
import cli { Command, Flag }
|
||||
import freeflowuniverse.herolib.osal
|
||||
// import freeflowuniverse.herolib.ai.mcp.vcode
|
||||
// import freeflowuniverse.herolib.ai.mcp.mcpgen
|
||||
// import freeflowuniverse.herolib.ai.mcp.baobab
|
||||
import freeflowuniverse.herolib.ai.mcp.rhai.mcp as rhai_mcp
|
||||
import freeflowuniverse.herolib.ai.mcp.rust
|
||||
|
||||
fn main() {
|
||||
do() or { panic(err) }
|
||||
}
|
||||
|
||||
pub fn do() ! {
|
||||
mut cmd_mcp := Command{
|
||||
name: 'mcp'
|
||||
usage: '
|
||||
## Manage your MCPs
|
||||
|
||||
example:
|
||||
|
||||
mcp
|
||||
'
|
||||
description: 'create, edit, show mdbooks'
|
||||
required_args: 0
|
||||
}
|
||||
|
||||
// cmd_run_add_flags(mut cmd_publisher)
|
||||
|
||||
cmd_mcp.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'debug'
|
||||
abbrev: 'd'
|
||||
description: 'show debug output'
|
||||
})
|
||||
|
||||
cmd_mcp.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'verbose'
|
||||
abbrev: 'v'
|
||||
description: 'show verbose output'
|
||||
})
|
||||
|
||||
mut cmd_inspector := Command{
|
||||
sort_flags: true
|
||||
name: 'inspector'
|
||||
execute: cmd_inspector_execute
|
||||
description: 'will list existing mdbooks'
|
||||
}
|
||||
|
||||
cmd_inspector.add_flag(Flag{
|
||||
flag: .string
|
||||
required: false
|
||||
name: 'name'
|
||||
abbrev: 'n'
|
||||
description: 'name of the MCP'
|
||||
})
|
||||
|
||||
cmd_inspector.add_flag(Flag{
|
||||
flag: .bool
|
||||
required: false
|
||||
name: 'open'
|
||||
abbrev: 'o'
|
||||
description: 'open inspector'
|
||||
})
|
||||
|
||||
cmd_mcp.add_command(rhai_mcp.command)
|
||||
cmd_mcp.add_command(rust.command)
|
||||
// cmd_mcp.add_command(baobab.command)
|
||||
// cmd_mcp.add_command(vcode.command)
|
||||
cmd_mcp.add_command(cmd_inspector)
|
||||
// cmd_mcp.add_command(vcode.command)
|
||||
cmd_mcp.setup()
|
||||
cmd_mcp.parse(os.args)
|
||||
}
|
||||
|
||||
fn cmd_inspector_execute(cmd Command) ! {
|
||||
open := cmd.flags.get_bool('open') or { false }
|
||||
if open {
|
||||
osal.exec(cmd: 'open http://localhost:5173')!
|
||||
}
|
||||
name := cmd.flags.get_string('name') or { '' }
|
||||
if name.len > 0 {
|
||||
println('starting inspector for MCP ${name}')
|
||||
osal.exec(cmd: 'npx @modelcontextprotocol/inspector mcp ${name} start')!
|
||||
} else {
|
||||
osal.exec(cmd: 'npx @modelcontextprotocol/inspector')!
|
||||
}
|
||||
}
|
||||
49
lib/ai/mcp/factory.v
Normal file
49
lib/ai/mcp/factory.v
Normal file
@@ -0,0 +1,49 @@
|
||||
module mcp
|
||||
|
||||
import time
|
||||
import os
|
||||
import log
|
||||
import x.json2
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
@[params]
|
||||
pub struct ServerParams {
|
||||
pub:
|
||||
handlers map[string]jsonrpc.ProcedureHandler
|
||||
config ServerConfiguration
|
||||
}
|
||||
|
||||
// new_server creates a new MCP server
|
||||
pub fn new_server(backend Backend, params ServerParams) !&Server {
|
||||
mut server := &Server{
|
||||
ServerConfiguration: params.config
|
||||
backend: backend
|
||||
}
|
||||
|
||||
// Create a handler with the core MCP procedures registered
|
||||
handler := jsonrpc.new_handler(jsonrpc.Handler{
|
||||
procedures: {
|
||||
//...params.handlers,
|
||||
// Core handlers
|
||||
'initialize': server.initialize_handler
|
||||
'notifications/initialized': initialized_notification_handler
|
||||
// Resource handlers
|
||||
'resources/list': server.resources_list_handler
|
||||
'resources/read': server.resources_read_handler
|
||||
'resources/templates/list': server.resources_templates_list_handler
|
||||
'resources/subscribe': server.resources_subscribe_handler
|
||||
// Prompt handlers
|
||||
'prompts/list': server.prompts_list_handler
|
||||
'prompts/get': server.prompts_get_handler
|
||||
'completion/complete': server.prompts_get_handler
|
||||
// Tool handlers
|
||||
'tools/list': server.tools_list_handler
|
||||
'tools/call': server.tools_call_handler
|
||||
// Sampling handlers
|
||||
'sampling/createMessage': server.sampling_create_message_handler
|
||||
}
|
||||
})!
|
||||
|
||||
server.handler = *handler
|
||||
return server
|
||||
}
|
||||
52
lib/ai/mcp/generics.v
Normal file
52
lib/ai/mcp/generics.v
Normal file
@@ -0,0 +1,52 @@
|
||||
module mcp
|
||||
|
||||
pub fn result_to_mcp_tool_contents[T](result T) []ToolContent {
|
||||
return [result_to_mcp_tool_content[T](result)]
|
||||
}
|
||||
|
||||
pub fn result_to_mcp_tool_content[T](result T) ToolContent {
|
||||
$if T is string {
|
||||
return ToolContent{
|
||||
typ: 'text'
|
||||
text: result.str()
|
||||
}
|
||||
} $else $if T is int {
|
||||
return ToolContent{
|
||||
typ: 'number'
|
||||
number: result.int()
|
||||
}
|
||||
} $else $if T is bool {
|
||||
return ToolContent{
|
||||
typ: 'boolean'
|
||||
boolean: result.bool()
|
||||
}
|
||||
} $else $if result is $array {
|
||||
mut items := []ToolContent{}
|
||||
for item in result {
|
||||
items << result_to_mcp_tool_content(item)
|
||||
}
|
||||
return ToolContent{
|
||||
typ: 'array'
|
||||
items: items
|
||||
}
|
||||
} $else $if T is $struct {
|
||||
mut properties := map[string]ToolContent{}
|
||||
$for field in T.fields {
|
||||
properties[field.name] = result_to_mcp_tool_content(result.$(field.name))
|
||||
}
|
||||
return ToolContent{
|
||||
typ: 'object'
|
||||
properties: properties
|
||||
}
|
||||
} $else {
|
||||
panic('Unsupported type: ${typeof(result)}')
|
||||
}
|
||||
}
|
||||
|
||||
pub fn array_to_mcp_tool_contents[U](array []U) []ToolContent {
|
||||
mut contents := []ToolContent{}
|
||||
for item in array {
|
||||
contents << result_to_mcp_tool_content(item)
|
||||
}
|
||||
return contents
|
||||
}
|
||||
27
lib/ai/mcp/handler_initialize.v
Normal file
27
lib/ai/mcp/handler_initialize.v
Normal file
@@ -0,0 +1,27 @@
|
||||
module mcp
|
||||
|
||||
import time
|
||||
import os
|
||||
import log
|
||||
import x.json2
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
// initialize_handler handles the initialize request according to the MCP specification
|
||||
fn (mut s Server) initialize_handler(data string) !string {
|
||||
// Decode the request with ClientConfiguration parameters
|
||||
request := jsonrpc.decode_request_generic[ClientConfiguration](data)!
|
||||
s.client_config = request.params
|
||||
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response_generic[ServerConfiguration](request.id, s.ServerConfiguration)
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// initialized_notification_handler handles the initialized notification
|
||||
// This notification is sent by the client after successful initialization
|
||||
fn initialized_notification_handler(data string) !string {
|
||||
// This is a notification, so no response is expected
|
||||
// Just log that we received the notification
|
||||
log.info('Received initialized notification')
|
||||
return ''
|
||||
}
|
||||
103
lib/ai/mcp/handler_initialize_test.v
Normal file
103
lib/ai/mcp/handler_initialize_test.v
Normal file
@@ -0,0 +1,103 @@
|
||||
module mcp
|
||||
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
import json
|
||||
|
||||
// This file contains tests for the MCP initialize handler implementation.
|
||||
// It tests the handler's ability to process initialize requests according to the MCP specification.
|
||||
|
||||
// test_initialize_handler tests the initialize handler with a sample initialize request
|
||||
fn test_initialize_handler() {
|
||||
mut server := Server{}
|
||||
|
||||
// Sample initialize request from the MCP specification
|
||||
initialize_request := '{"jsonrpc":"2.0","id":0,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{"sampling":{},"roots":{"listChanged":true}},"clientInfo":{"name":"mcp-inspector","version":"0.0.1"}}}'
|
||||
|
||||
// Call the initialize handler directly
|
||||
response := server.initialize_handler(initialize_request) or {
|
||||
assert false, 'Initialize handler failed: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the response to verify its structure
|
||||
decoded_response := jsonrpc.decode_response(response) or {
|
||||
assert false, 'Failed to decode response: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify that the response is not an error
|
||||
assert !decoded_response.is_error(), 'Response should not be an error'
|
||||
|
||||
// Parse the result to verify its contents
|
||||
result_json := decoded_response.result() or {
|
||||
assert false, 'Failed to get result: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the result into an ServerConfiguration struct
|
||||
result := json.decode(ServerConfiguration, result_json) or {
|
||||
assert false, 'Failed to decode result: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the protocol version matches what was requested
|
||||
assert result.protocol_version == '2024-11-05', 'Protocol version should match the request'
|
||||
|
||||
// Verify server capabilities
|
||||
assert result.capabilities.prompts.list_changed == true, 'Prompts capability should have list_changed set to true'
|
||||
assert result.capabilities.resources.subscribe == true, 'Resources capability should have subscribe set to true'
|
||||
assert result.capabilities.resources.list_changed == true, 'Resources capability should have list_changed set to true'
|
||||
assert result.capabilities.tools.list_changed == true, 'Tools capability should have list_changed set to true'
|
||||
|
||||
// Verify server info
|
||||
assert result.server_info.name == 'HeroLibMCPServer', 'Server name should be HeroLibMCPServer'
|
||||
assert result.server_info.version == '1.0.0', 'Server version should be 1.0.0'
|
||||
}
|
||||
|
||||
// test_initialize_handler_with_handler tests the initialize handler through the JSONRPC handler
|
||||
fn test_initialize_handler_with_handler() {
|
||||
mut server := Server{}
|
||||
|
||||
// Create a handler with just the initialize procedure
|
||||
handler := jsonrpc.new_handler(jsonrpc.Handler{
|
||||
procedures: {
|
||||
'initialize': server.initialize_handler
|
||||
}
|
||||
}) or {
|
||||
assert false, 'Failed to create handler: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Sample initialize request from the MCP specification
|
||||
initialize_request := '{"jsonrpc":"2.0","id":0,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{"sampling":{},"roots":{"listChanged":true}},"clientInfo":{"name":"mcp-inspector","version":"0.0.1"}}}'
|
||||
|
||||
// Process the request through the handler
|
||||
response := handler.handle(initialize_request) or {
|
||||
assert false, 'Handler failed to process request: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the response to verify its structure
|
||||
decoded_response := jsonrpc.decode_response(response) or {
|
||||
assert false, 'Failed to decode response: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify that the response is not an error
|
||||
assert !decoded_response.is_error(), 'Response should not be an error'
|
||||
|
||||
// Parse the result to verify its contents
|
||||
result_json := decoded_response.result() or {
|
||||
assert false, 'Failed to get result: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the result into an ServerConfiguration struct
|
||||
result := json.decode(ServerConfiguration, result_json) or {
|
||||
assert false, 'Failed to decode result: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the protocol version matches what was requested
|
||||
assert result.protocol_version == '2024-11-05', 'Protocol version should match the request'
|
||||
}
|
||||
135
lib/ai/mcp/handler_prompts.v
Normal file
135
lib/ai/mcp/handler_prompts.v
Normal file
@@ -0,0 +1,135 @@
|
||||
module mcp
|
||||
|
||||
import time
|
||||
import os
|
||||
import log
|
||||
import x.json2
|
||||
import json
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
// Prompt related structs
|
||||
|
||||
pub struct Prompt {
|
||||
pub:
|
||||
name string
|
||||
description string
|
||||
arguments []PromptArgument
|
||||
}
|
||||
|
||||
pub struct PromptArgument {
|
||||
pub:
|
||||
name string
|
||||
description string
|
||||
required bool
|
||||
}
|
||||
|
||||
pub struct PromptMessage {
|
||||
pub:
|
||||
role string
|
||||
content PromptContent
|
||||
}
|
||||
|
||||
pub struct PromptContent {
|
||||
pub:
|
||||
typ string @[json: 'type']
|
||||
text string
|
||||
data string
|
||||
mimetype string @[json: 'mimeType']
|
||||
resource ResourceContent
|
||||
}
|
||||
|
||||
// Prompt List Handler
|
||||
|
||||
pub struct PromptListParams {
|
||||
pub:
|
||||
cursor string
|
||||
}
|
||||
|
||||
pub struct PromptListResult {
|
||||
pub:
|
||||
prompts []Prompt
|
||||
next_cursor string @[json: 'nextCursor']
|
||||
}
|
||||
|
||||
// prompts_list_handler handles the prompts/list request
|
||||
// This request is used to retrieve a list of available prompts
|
||||
fn (mut s Server) prompts_list_handler(data string) !string {
|
||||
// Decode the request with cursor parameter
|
||||
request := jsonrpc.decode_request_generic[PromptListParams](data)!
|
||||
cursor := request.params.cursor
|
||||
|
||||
// TODO: Implement pagination logic using the cursor
|
||||
// For now, return all prompts
|
||||
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response_generic[PromptListResult](request.id, PromptListResult{
|
||||
prompts: s.backend.prompt_list()!
|
||||
next_cursor: '' // Empty if no more pages
|
||||
})
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Prompt Get Handler
|
||||
|
||||
pub struct PromptGetParams {
|
||||
pub:
|
||||
name string
|
||||
arguments map[string]string
|
||||
}
|
||||
|
||||
pub struct PromptGetResult {
|
||||
pub:
|
||||
description string
|
||||
messages []PromptMessage
|
||||
}
|
||||
|
||||
// prompts_get_handler handles the prompts/get request
|
||||
// This request is used to retrieve a specific prompt with arguments
|
||||
fn (mut s Server) prompts_get_handler(data string) !string {
|
||||
// Decode the request with name and arguments parameters
|
||||
request_map := json2.raw_decode(data)!.as_map()
|
||||
params_map := request_map['params'].as_map()
|
||||
|
||||
if !s.backend.prompt_exists(params_map['name'].str())! {
|
||||
return jsonrpc.new_error_response(request_map['id'].int(), prompt_not_found(params_map['name'].str())).encode()
|
||||
}
|
||||
|
||||
// Get the prompt by name
|
||||
prompt := s.backend.prompt_get(params_map['name'].str())!
|
||||
|
||||
// Validate required arguments
|
||||
for arg in prompt.arguments {
|
||||
if arg.required && params_map['arguments'].as_map()[arg.name].str() == '' {
|
||||
return jsonrpc.new_error_response(request_map['id'].int(), missing_required_argument(arg.name)).encode()
|
||||
}
|
||||
}
|
||||
|
||||
messages := s.backend.prompt_call(params_map['name'].str(), params_map['arguments'].as_map().values().map(it.str()))!
|
||||
|
||||
// // Get the prompt messages with arguments applied
|
||||
// messages := s.backend.prompt_messages_get(request.params.name, request.params.arguments)!
|
||||
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response_generic[PromptGetResult](request_map['id'].int(),
|
||||
PromptGetResult{
|
||||
description: prompt.description
|
||||
messages: messages
|
||||
})
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Prompt Notification Handlers
|
||||
|
||||
// send_prompts_list_changed_notification sends a notification when the list of prompts changes
|
||||
pub fn (mut s Server) send_prompts_list_changed_notification() ! {
|
||||
// Check if the client supports this notification
|
||||
if !s.client_config.capabilities.roots.list_changed {
|
||||
return
|
||||
}
|
||||
|
||||
// Create a notification
|
||||
notification := jsonrpc.new_blank_notification('notifications/prompts/list_changed')
|
||||
s.send(json.encode(notification))
|
||||
// Send the notification to all connected clients
|
||||
log.info('Sending prompts list changed notification: ${json.encode(notification)}')
|
||||
}
|
||||
186
lib/ai/mcp/handler_resources.v
Normal file
186
lib/ai/mcp/handler_resources.v
Normal file
@@ -0,0 +1,186 @@
|
||||
module mcp
|
||||
|
||||
import time
|
||||
import os
|
||||
import log
|
||||
import x.json2
|
||||
import json
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
pub struct Resource {
|
||||
pub:
|
||||
uri string
|
||||
name string
|
||||
description string
|
||||
mimetype string @[json: 'mimeType']
|
||||
}
|
||||
|
||||
// Resource List Handler
|
||||
|
||||
pub struct ResourceListParams {
|
||||
pub:
|
||||
cursor string
|
||||
}
|
||||
|
||||
pub struct ResourceListResult {
|
||||
pub:
|
||||
resources []Resource
|
||||
next_cursor string @[json: 'nextCursor']
|
||||
}
|
||||
|
||||
// resources_list_handler handles the resources/list request
|
||||
// This request is used to retrieve a list of available resources
|
||||
fn (mut s Server) resources_list_handler(data string) !string {
|
||||
// Decode the request with cursor parameter
|
||||
request := jsonrpc.decode_request_generic[ResourceListParams](data)!
|
||||
cursor := request.params.cursor
|
||||
|
||||
// TODO: Implement pagination logic using the cursor
|
||||
// For now, return all resources
|
||||
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response_generic[ResourceListResult](request.id, ResourceListResult{
|
||||
resources: s.backend.resource_list()!
|
||||
next_cursor: '' // Empty if no more pages
|
||||
})
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Resource Read Handler
|
||||
|
||||
pub struct ResourceReadParams {
|
||||
pub:
|
||||
uri string
|
||||
}
|
||||
|
||||
pub struct ResourceReadResult {
|
||||
pub:
|
||||
contents []ResourceContent
|
||||
}
|
||||
|
||||
pub struct ResourceContent {
|
||||
pub:
|
||||
uri string
|
||||
mimetype string @[json: 'mimeType']
|
||||
text string
|
||||
blob string // Base64-encoded binary data
|
||||
}
|
||||
|
||||
// resources_read_handler handles the resources/read request
|
||||
// This request is used to retrieve the contents of a resource
|
||||
fn (mut s Server) resources_read_handler(data string) !string {
|
||||
// Decode the request with uri parameter
|
||||
request := jsonrpc.decode_request_generic[ResourceReadParams](data)!
|
||||
|
||||
if !s.backend.resource_exists(request.params.uri)! {
|
||||
return jsonrpc.new_error_response(request.id, resource_not_found(request.params.uri)).encode()
|
||||
}
|
||||
|
||||
// Get the resource contents by URI
|
||||
resource_contents := s.backend.resource_contents_get(request.params.uri)!
|
||||
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response_generic[ResourceReadResult](request.id, ResourceReadResult{
|
||||
contents: resource_contents
|
||||
})
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Resource Templates Handler
|
||||
|
||||
pub struct ResourceTemplatesListResult {
|
||||
pub:
|
||||
resource_templates []ResourceTemplate @[json: 'resourceTemplates']
|
||||
}
|
||||
|
||||
pub struct ResourceTemplate {
|
||||
pub:
|
||||
uri_template string @[json: 'uriTemplate']
|
||||
name string
|
||||
description string
|
||||
mimetype string @[json: 'mimeType']
|
||||
}
|
||||
|
||||
// resources_templates_list_handler handles the resources/templates/list request
|
||||
// This request is used to retrieve a list of available resource templates
|
||||
fn (mut s Server) resources_templates_list_handler(data string) !string {
|
||||
// Decode the request
|
||||
request := jsonrpc.decode_request(data)!
|
||||
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response_generic[ResourceTemplatesListResult](request.id,
|
||||
ResourceTemplatesListResult{
|
||||
resource_templates: s.backend.resource_templates_list()!
|
||||
})
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Resource Subscription Handler
|
||||
|
||||
pub struct ResourceSubscribeParams {
|
||||
pub:
|
||||
uri string
|
||||
}
|
||||
|
||||
pub struct ResourceSubscribeResult {
|
||||
pub:
|
||||
subscribed bool
|
||||
}
|
||||
|
||||
// resources_subscribe_handler handles the resources/subscribe request
|
||||
// This request is used to subscribe to changes for a specific resource
|
||||
fn (mut s Server) resources_subscribe_handler(data string) !string {
|
||||
request := jsonrpc.decode_request_generic[ResourceSubscribeParams](data)!
|
||||
|
||||
if !s.backend.resource_exists(request.params.uri)! {
|
||||
return jsonrpc.new_error_response(request.id, resource_not_found(request.params.uri)).encode()
|
||||
}
|
||||
|
||||
s.backend.resource_subscribe(request.params.uri)!
|
||||
|
||||
response := jsonrpc.new_response_generic[ResourceSubscribeResult](request.id, ResourceSubscribeResult{
|
||||
subscribed: true
|
||||
})
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Resource Notification Handlers
|
||||
|
||||
// send_resources_list_changed_notification sends a notification when the list of resources changes
|
||||
pub fn (mut s Server) send_resources_list_changed_notification() ! {
|
||||
// Check if the client supports this notification
|
||||
if !s.client_config.capabilities.roots.list_changed {
|
||||
return
|
||||
}
|
||||
|
||||
// Create a notification
|
||||
notification := jsonrpc.new_blank_notification('notifications/resources/list_changed')
|
||||
s.send(json.encode(notification))
|
||||
// Send the notification to all connected clients
|
||||
// In a real implementation, this would use a WebSocket or other transport
|
||||
log.info('Sending resources list changed notification: ${json.encode(notification)}')
|
||||
}
|
||||
|
||||
pub struct ResourceUpdatedParams {
|
||||
pub:
|
||||
uri string
|
||||
}
|
||||
|
||||
// send_resource_updated_notification sends a notification when a subscribed resource is updated
|
||||
pub fn (mut s Server) send_resource_updated_notification(uri string) ! {
|
||||
// Check if the client is subscribed to this resource
|
||||
if !s.backend.resource_subscribed(uri)! {
|
||||
return
|
||||
}
|
||||
|
||||
// Create a notification
|
||||
notification := jsonrpc.new_notification[ResourceUpdatedParams]('notifications/resources/updated',
|
||||
ResourceUpdatedParams{
|
||||
uri: uri
|
||||
})
|
||||
|
||||
s.send(json.encode(notification))
|
||||
// Send the notification to all connected clients
|
||||
// In a real implementation, this would use a WebSocket or other transport
|
||||
log.info('Sending resource updated notification: ${json.encode(notification)}')
|
||||
}
|
||||
145
lib/ai/mcp/handler_sampling.v
Normal file
145
lib/ai/mcp/handler_sampling.v
Normal file
@@ -0,0 +1,145 @@
|
||||
module mcp
|
||||
|
||||
import time
|
||||
import os
|
||||
import log
|
||||
import x.json2
|
||||
import json
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
// Sampling related structs
|
||||
|
||||
pub struct MessageContent {
|
||||
pub:
|
||||
typ string @[json: 'type']
|
||||
text string
|
||||
data string
|
||||
mimetype string @[json: 'mimeType']
|
||||
}
|
||||
|
||||
pub struct Message {
|
||||
pub:
|
||||
role string
|
||||
content MessageContent
|
||||
}
|
||||
|
||||
pub struct ModelHint {
|
||||
pub:
|
||||
name string
|
||||
}
|
||||
|
||||
pub struct ModelPreferences {
|
||||
pub:
|
||||
hints []ModelHint
|
||||
cost_priority f32 @[json: 'costPriority']
|
||||
speed_priority f32 @[json: 'speedPriority']
|
||||
intelligence_priority f32 @[json: 'intelligencePriority']
|
||||
}
|
||||
|
||||
pub struct SamplingCreateMessageParams {
|
||||
pub:
|
||||
messages []Message
|
||||
model_preferences ModelPreferences @[json: 'modelPreferences']
|
||||
system_prompt string @[json: 'systemPrompt']
|
||||
include_context string @[json: 'includeContext']
|
||||
temperature f32
|
||||
max_tokens int @[json: 'maxTokens']
|
||||
stop_sequences []string @[json: 'stopSequences']
|
||||
metadata map[string]json2.Any
|
||||
}
|
||||
|
||||
pub struct SamplingCreateMessageResult {
|
||||
pub:
|
||||
model string
|
||||
stop_reason string @[json: 'stopReason']
|
||||
role string
|
||||
content MessageContent
|
||||
}
|
||||
|
||||
// sampling_create_message_handler handles the sampling/createMessage request
|
||||
// This request is used to request LLM completions through the client
|
||||
fn (mut s Server) sampling_create_message_handler(data string) !string {
|
||||
// Decode the request
|
||||
request_map := json2.raw_decode(data)!.as_map()
|
||||
id := request_map['id'].int()
|
||||
params_map := request_map['params'].as_map()
|
||||
|
||||
// Validate required parameters
|
||||
if 'messages' !in params_map {
|
||||
return jsonrpc.new_error_response(id, missing_required_argument('messages')).encode()
|
||||
}
|
||||
|
||||
if 'maxTokens' !in params_map {
|
||||
return jsonrpc.new_error_response(id, missing_required_argument('maxTokens')).encode()
|
||||
}
|
||||
|
||||
// Call the backend to handle the sampling request
|
||||
result := s.backend.sampling_create_message(params_map) or {
|
||||
return jsonrpc.new_error_response(id, sampling_error(err.msg())).encode()
|
||||
}
|
||||
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response(id, json.encode(result))
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Helper function to convert JSON messages to our Message struct format
|
||||
fn parse_messages(messages_json json2.Any) ![]Message {
|
||||
messages_arr := messages_json.arr()
|
||||
mut result := []Message{cap: messages_arr.len}
|
||||
|
||||
for msg_json in messages_arr {
|
||||
msg_map := msg_json.as_map()
|
||||
|
||||
if 'role' !in msg_map {
|
||||
return error('Missing role in message')
|
||||
}
|
||||
|
||||
if 'content' !in msg_map {
|
||||
return error('Missing content in message')
|
||||
}
|
||||
|
||||
role := msg_map['role'].str()
|
||||
content_map := msg_map['content'].as_map()
|
||||
|
||||
if 'type' !in content_map {
|
||||
return error('Missing type in message content')
|
||||
}
|
||||
|
||||
typ := content_map['type'].str()
|
||||
mut text := ''
|
||||
mut data := ''
|
||||
mut mimetype := ''
|
||||
|
||||
if typ == 'text' {
|
||||
if 'text' !in content_map {
|
||||
return error('Missing text in text content')
|
||||
}
|
||||
text = content_map['text'].str()
|
||||
} else if typ == 'image' {
|
||||
if 'data' !in content_map {
|
||||
return error('Missing data in image content')
|
||||
}
|
||||
data = content_map['data'].str()
|
||||
|
||||
if 'mimeType' !in content_map {
|
||||
return error('Missing mimeType in image content')
|
||||
}
|
||||
mimetype = content_map['mimeType'].str()
|
||||
} else {
|
||||
return error('Unsupported content type: ${typ}')
|
||||
}
|
||||
|
||||
result << Message{
|
||||
role: role
|
||||
content: MessageContent{
|
||||
typ: typ
|
||||
text: text
|
||||
data: data
|
||||
mimetype: mimetype
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
151
lib/ai/mcp/handler_tools.v
Normal file
151
lib/ai/mcp/handler_tools.v
Normal file
@@ -0,0 +1,151 @@
|
||||
module mcp
|
||||
|
||||
import time
|
||||
import os
|
||||
import log
|
||||
import x.json2
|
||||
import json
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
import freeflowuniverse.herolib.schemas.jsonschema
|
||||
|
||||
// Tool related structs
|
||||
|
||||
pub struct Tool {
|
||||
pub:
|
||||
name string
|
||||
description string
|
||||
input_schema jsonschema.Schema @[json: 'inputSchema']
|
||||
}
|
||||
|
||||
pub struct ToolProperty {
|
||||
pub:
|
||||
typ string @[json: 'type']
|
||||
items ToolItems
|
||||
enum []string
|
||||
}
|
||||
|
||||
pub struct ToolItems {
|
||||
pub:
|
||||
typ string @[json: 'type']
|
||||
enum []string
|
||||
properties map[string]ToolProperty
|
||||
}
|
||||
|
||||
pub struct ToolContent {
|
||||
pub:
|
||||
typ string @[json: 'type']
|
||||
text string
|
||||
number int
|
||||
boolean bool
|
||||
properties map[string]ToolContent
|
||||
items []ToolContent
|
||||
}
|
||||
|
||||
// Tool List Handler
|
||||
|
||||
pub struct ToolListParams {
|
||||
pub:
|
||||
cursor string
|
||||
}
|
||||
|
||||
pub struct ToolListResult {
|
||||
pub:
|
||||
tools []Tool
|
||||
next_cursor string @[json: 'nextCursor']
|
||||
}
|
||||
|
||||
// tools_list_handler handles the tools/list request
|
||||
// This request is used to retrieve a list of available tools
|
||||
fn (mut s Server) tools_list_handler(data string) !string {
|
||||
// Decode the request with cursor parameter
|
||||
request := jsonrpc.decode_request_generic[ToolListParams](data)!
|
||||
cursor := request.params.cursor
|
||||
|
||||
// TODO: Implement pagination logic using the cursor
|
||||
// For now, return all tools
|
||||
encoded := json.encode(ToolListResult{
|
||||
tools: s.backend.tool_list()!
|
||||
next_cursor: '' // Empty if no more pages
|
||||
})
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response(request.id, json.encode(ToolListResult{
|
||||
tools: s.backend.tool_list()!
|
||||
next_cursor: '' // Empty if no more pages
|
||||
}))
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Tool Call Handler
|
||||
|
||||
pub struct ToolCallParams {
|
||||
pub:
|
||||
name string
|
||||
arguments map[string]json2.Any
|
||||
meta map[string]json2.Any @[json: '_meta']
|
||||
}
|
||||
|
||||
pub struct ToolCallResult {
|
||||
pub:
|
||||
is_error bool @[json: 'isError']
|
||||
content []ToolContent
|
||||
}
|
||||
|
||||
// tools_call_handler handles the tools/call request
|
||||
// This request is used to call a specific tool with arguments
|
||||
fn (mut s Server) tools_call_handler(data string) !string {
|
||||
// Decode the request with name and arguments parameters
|
||||
request_map := json2.raw_decode(data)!.as_map()
|
||||
params_map := request_map['params'].as_map()
|
||||
tool_name := params_map['name'].str()
|
||||
if !s.backend.tool_exists(tool_name)! {
|
||||
return jsonrpc.new_error_response(request_map['id'].int(), tool_not_found(tool_name)).encode()
|
||||
}
|
||||
|
||||
arguments := params_map['arguments'].as_map()
|
||||
// Get the tool by name
|
||||
tool := s.backend.tool_get(tool_name)!
|
||||
|
||||
// Validate arguments against the input schema
|
||||
// TODO: Implement proper JSON Schema validation
|
||||
for req in tool.input_schema.required {
|
||||
if req !in arguments {
|
||||
return jsonrpc.new_error_response(request_map['id'].int(), missing_required_argument(req)).encode()
|
||||
}
|
||||
}
|
||||
|
||||
log.error('Calling tool: ${tool_name} with arguments: ${arguments}')
|
||||
// Call the tool with the provided arguments
|
||||
result := s.backend.tool_call(tool_name, arguments)!
|
||||
|
||||
log.error('Received result from tool: ${tool_name} with result: ${result}')
|
||||
// Create a success response with the result
|
||||
response := jsonrpc.new_response_generic[ToolCallResult](request_map['id'].int(),
|
||||
result)
|
||||
return response.encode()
|
||||
}
|
||||
|
||||
// Tool Notification Handlers
|
||||
|
||||
// send_tools_list_changed_notification sends a notification when the list of tools changes
|
||||
pub fn (mut s Server) send_tools_list_changed_notification() ! {
|
||||
// Check if the client supports this notification
|
||||
if !s.client_config.capabilities.roots.list_changed {
|
||||
return
|
||||
}
|
||||
|
||||
// Create a notification
|
||||
notification := jsonrpc.new_blank_notification('notifications/tools/list_changed')
|
||||
s.send(json.encode(notification))
|
||||
// Send the notification to all connected clients
|
||||
log.info('Sending tools list changed notification: ${json.encode(notification)}')
|
||||
}
|
||||
|
||||
pub fn error_tool_call_result(err IError) ToolCallResult {
|
||||
return ToolCallResult{
|
||||
is_error: true
|
||||
content: [ToolContent{
|
||||
typ: 'text'
|
||||
text: err.msg()
|
||||
}]
|
||||
}
|
||||
}
|
||||
92
lib/ai/mcp/mcpgen/README.md
Normal file
92
lib/ai/mcp/mcpgen/README.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# MCP Generator
|
||||
|
||||
An implementation of the [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) server for V language operations. This server uses the Standard Input/Output (stdio) transport as described in the [MCP documentation](https://modelcontextprotocol.io/docs/concepts/transports).
|
||||
|
||||
## Features
|
||||
|
||||
The server supports the following operations:
|
||||
|
||||
1. **test** - Run V tests on a file or directory
|
||||
2. **run** - Execute V code from a file or directory
|
||||
3. **compile** - Compile V code from a file or directory
|
||||
4. **vet** - Run V vet on a file or directory
|
||||
|
||||
## Usage
|
||||
|
||||
### Building the Server
|
||||
|
||||
```bash
|
||||
v -gc none -stats -enable-globals -n -w -cg -g -cc tcc /Users/despiegk/code/github/freeflowuniverse/herolib/lib/mcp/v_do
|
||||
```
|
||||
|
||||
### Using the Server
|
||||
|
||||
The server communicates using the MCP protocol over stdio. To send a request, use the following format:
|
||||
|
||||
```
|
||||
Content-Length: <length>
|
||||
|
||||
{"jsonrpc":"2.0","id":"<request-id>","method":"<method-name>","params":{"fullpath":"<path-to-file-or-directory>"}}
|
||||
```
|
||||
|
||||
Where:
|
||||
- `<length>` is the length of the JSON message in bytes
|
||||
- `<request-id>` is a unique identifier for the request
|
||||
- `<method-name>` is one of: `test`, `run`, `compile`, or `vet`
|
||||
- `<path-to-file-or-directory>` is the absolute path to the V file or directory to process
|
||||
|
||||
### Example
|
||||
|
||||
Request:
|
||||
```
|
||||
Content-Length: 85
|
||||
|
||||
{"jsonrpc":"2.0","id":"1","method":"test","params":{"fullpath":"/path/to/file.v"}}
|
||||
```
|
||||
|
||||
Response:
|
||||
```
|
||||
Content-Length: 245
|
||||
|
||||
{"jsonrpc":"2.0","id":"1","result":{"output":"Command: v -gc none -stats -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc test /path/to/file.v\nExit code: 0\nOutput:\nAll tests passed!"}}
|
||||
```
|
||||
|
||||
## Methods
|
||||
|
||||
### test
|
||||
|
||||
Runs V tests on the specified file or directory.
|
||||
|
||||
Command used:
|
||||
```
|
||||
v -gc none -stats -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc test ${fullpath}
|
||||
```
|
||||
|
||||
If a directory is specified, it will run tests on all `.v` files in the directory (non-recursive).
|
||||
|
||||
### run
|
||||
|
||||
Executes the specified V file or all V files in a directory.
|
||||
|
||||
Command used:
|
||||
```
|
||||
v -gc none -stats -enable-globals -n -w -cg -g -cc tcc run ${fullpath}
|
||||
```
|
||||
|
||||
### compile
|
||||
|
||||
Compiles the specified V file or all V files in a directory.
|
||||
|
||||
Command used:
|
||||
```
|
||||
cd /tmp && v -gc none -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc ${fullpath}
|
||||
```
|
||||
|
||||
### vet
|
||||
|
||||
Runs V vet on the specified file or directory.
|
||||
|
||||
Command used:
|
||||
```
|
||||
v vet -v -w ${fullpath}
|
||||
```
|
||||
22
lib/ai/mcp/mcpgen/command.v
Normal file
22
lib/ai/mcp/mcpgen/command.v
Normal file
@@ -0,0 +1,22 @@
|
||||
module mcpgen
|
||||
|
||||
import cli
|
||||
|
||||
pub const command = cli.Command{
|
||||
sort_flags: true
|
||||
name: 'mcpgen'
|
||||
// execute: cmd_mcpgen
|
||||
description: 'will list existing mdbooks'
|
||||
commands: [
|
||||
cli.Command{
|
||||
name: 'start'
|
||||
execute: cmd_start
|
||||
description: 'start the MCP server'
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
fn cmd_start(cmd cli.Command) ! {
|
||||
mut server := new_mcp_server(&MCPGen{})!
|
||||
server.start()!
|
||||
}
|
||||
281
lib/ai/mcp/mcpgen/mcpgen.v
Normal file
281
lib/ai/mcp/mcpgen/mcpgen.v
Normal file
@@ -0,0 +1,281 @@
|
||||
module mcpgen
|
||||
|
||||
import freeflowuniverse.herolib.core.code
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.schemas.jsonschema
|
||||
import freeflowuniverse.herolib.schemas.jsonschema.codegen
|
||||
import os
|
||||
|
||||
pub struct FunctionPointer {
|
||||
name string // name of function
|
||||
module_path string // path to module
|
||||
}
|
||||
|
||||
// create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
|
||||
// returns an MCP Tool code in v for attaching the function to the mcp server
|
||||
// function_pointers: A list of function pointers to generate tools for
|
||||
pub fn (d &MCPGen) create_mcp_tools_code(function_pointers []FunctionPointer) !string {
|
||||
mut str := ''
|
||||
|
||||
for function_pointer in function_pointers {
|
||||
str += d.create_mcp_tool_code(function_pointer.name, function_pointer.module_path)!
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
|
||||
// returns an MCP Tool code in v for attaching the function to the mcp server
|
||||
pub fn (d &MCPGen) create_mcp_tool_code(function_name string, module_path string) !string {
|
||||
if !os.exists(module_path) {
|
||||
return error('Module path does not exist: ${module_path}')
|
||||
}
|
||||
|
||||
function := code.get_function_from_module(module_path, function_name) or {
|
||||
return error('Failed to get function ${function_name} from module ${module_path}\n${err}')
|
||||
}
|
||||
|
||||
mut types := map[string]string{}
|
||||
for param in function.params {
|
||||
// Check if the type is an Object (struct)
|
||||
if param.typ is code.Object {
|
||||
types[param.typ.symbol()] = code.get_type_from_module(module_path, param.typ.symbol())!
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result type if it's a struct
|
||||
mut result_ := ''
|
||||
if function.result.typ is code.Result {
|
||||
result_type := (function.result.typ as code.Result).typ
|
||||
if result_type is code.Object {
|
||||
result_ = code.get_type_from_module(module_path, result_type.symbol())!
|
||||
}
|
||||
} else if function.result.typ is code.Object {
|
||||
result_ = code.get_type_from_module(module_path, function.result.typ.symbol())!
|
||||
}
|
||||
|
||||
tool_name := function.name
|
||||
tool := d.create_mcp_tool(function, types)!
|
||||
handler := d.create_mcp_tool_handler(function, types, result_)!
|
||||
str := $tmpl('./templates/tool_code.v.template')
|
||||
return str
|
||||
}
|
||||
|
||||
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
|
||||
// function: The V function string including preceding comments
|
||||
// types: A map of struct names to their definitions for complex parameter types
|
||||
// result: The type of result of the create_mcp_tool function. Could be simply string, or struct {...}
|
||||
pub fn (d &MCPGen) create_mcp_tool_handler(function code.Function, types map[string]string, result_ string) !string {
|
||||
decode_stmts := function.params.map(argument_decode_stmt(it)).join_lines()
|
||||
|
||||
function_call := 'd.${function.name}(${function.params.map(it.name).join(',')})'
|
||||
result := code.parse_type(result_)
|
||||
str := $tmpl('./templates/tool_handler.v.template')
|
||||
return str
|
||||
}
|
||||
|
||||
pub fn argument_decode_stmt(param code.Param) string {
|
||||
return if param.typ is code.Integer {
|
||||
'${param.name} := arguments["${param.name}"].int()'
|
||||
} else if param.typ is code.Boolean {
|
||||
'${param.name} := arguments["${param.name}"].bool()'
|
||||
} else if param.typ is code.String {
|
||||
'${param.name} := arguments["${param.name}"].str()'
|
||||
} else if param.typ is code.Object {
|
||||
'${param.name} := json.decode[${param.typ.symbol()}](arguments["${param.name}"].str())!'
|
||||
} else if param.typ is code.Array {
|
||||
'${param.name} := json.decode[${param.typ.symbol()}](arguments["${param.name}"].str())!'
|
||||
} else if param.typ is code.Map {
|
||||
'${param.name} := json.decode[${param.typ.symbol()}](arguments["${param.name}"].str())!'
|
||||
} else {
|
||||
panic('Unsupported type: ${param.typ}')
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
in @generate_mcp.v , implement a create_mpc_tool_handler function that given a vlang function string and the types that map to their corresponding type definitions (for instance struct some_type: SomeType{...}), generates a vlang function such as the following:
|
||||
|
||||
ou
|
||||
pub fn (d &MCPGen) create_mcp_tool_tool_handler(arguments map[string]Any) !mcp.Tool {
|
||||
function := arguments['function'].str()
|
||||
types := json.decode[map[string]string](arguments['types'].str())!
|
||||
return d.create_mcp_tool(function, types)
|
||||
}
|
||||
*/
|
||||
|
||||
// create_mcp_tool parses a V language function string and returns an MCP Tool struct
|
||||
// function: The V function string including preceding comments
|
||||
// types: A map of struct names to their definitions for complex parameter types
|
||||
pub fn (d MCPGen) create_mcp_tool(function code.Function, types map[string]string) !mcp.Tool {
|
||||
// Create input schema for parameters
|
||||
mut properties := map[string]jsonschema.SchemaRef{}
|
||||
mut required := []string{}
|
||||
|
||||
for param in function.params {
|
||||
// Add to required parameters
|
||||
required << param.name
|
||||
|
||||
// Create property for this parameter
|
||||
mut property := jsonschema.SchemaRef{}
|
||||
|
||||
// Check if this is a complex type defined in the types map
|
||||
if param.typ.symbol() in types {
|
||||
// Parse the struct definition to create a nested schema
|
||||
struct_def := types[param.typ.symbol()]
|
||||
struct_schema := codegen.struct_to_schema(code.parse_struct(struct_def)!)
|
||||
if struct_schema is jsonschema.Schema {
|
||||
property = struct_schema
|
||||
} else {
|
||||
return error('Unsupported type: ${param.typ}')
|
||||
}
|
||||
} else {
|
||||
// Handle primitive types
|
||||
property = codegen.typesymbol_to_schema(param.typ.symbol())
|
||||
}
|
||||
|
||||
properties[param.name] = property
|
||||
}
|
||||
|
||||
// Create the input schema
|
||||
input_schema := jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: properties
|
||||
required: required
|
||||
}
|
||||
|
||||
// Create and return the Tool
|
||||
return mcp.Tool{
|
||||
name: function.name
|
||||
description: function.description
|
||||
input_schema: input_schema
|
||||
}
|
||||
}
|
||||
|
||||
// // create_mcp_tool_input_schema creates a jsonschema.Schema for a given input type
|
||||
// // input: The input type string
|
||||
// // returns: A jsonschema.Schema for the given input type
|
||||
// // errors: Returns an error if the input type is not supported
|
||||
// pub fn (d MCPGen) create_mcp_tool_input_schema(input string) !jsonschema.Schema {
|
||||
|
||||
// // if input is a primitive type, return a mcp jsonschema.Schema with that type
|
||||
// if input == 'string' {
|
||||
// return jsonschema.Schema{
|
||||
// typ: 'string'
|
||||
// }
|
||||
// } else if input == 'int' {
|
||||
// return jsonschema.Schema{
|
||||
// typ: 'integer'
|
||||
// }
|
||||
// } else if input == 'float' {
|
||||
// return jsonschema.Schema{
|
||||
// typ: 'number'
|
||||
// }
|
||||
// } else if input == 'bool' {
|
||||
// return jsonschema.Schema{
|
||||
// typ: 'boolean'
|
||||
// }
|
||||
// }
|
||||
|
||||
// // if input is a struct, return a mcp jsonschema.Schema with typ 'object' and properties for each field in the struct
|
||||
// if input.starts_with('pub struct ') {
|
||||
// struct_name := input[11..].split(' ')[0]
|
||||
// fields := parse_struct_fields(input)
|
||||
// mut properties := map[string]jsonschema.Schema{}
|
||||
|
||||
// for field_name, field_type in fields {
|
||||
// property := jsonschema.Schema{
|
||||
// typ: d.create_mcp_tool_input_schema(field_type)!.typ
|
||||
// }
|
||||
// properties[field_name] = property
|
||||
// }
|
||||
|
||||
// return jsonschema.Schema{
|
||||
// typ: 'object',
|
||||
// properties: properties
|
||||
// }
|
||||
// }
|
||||
|
||||
// // if input is an array, return a mcp jsonschema.Schema with typ 'array' and items of the item type
|
||||
// if input.starts_with('[]') {
|
||||
// item_type := input[2..]
|
||||
|
||||
// // For array types, we create a schema with type 'array'
|
||||
// // The actual item type is determined by the primitive type
|
||||
// mut item_type_str := 'string' // default
|
||||
// if item_type == 'int' {
|
||||
// item_type_str = 'integer'
|
||||
// } else if item_type == 'float' {
|
||||
// item_type_str = 'number'
|
||||
// } else if item_type == 'bool' {
|
||||
// item_type_str = 'boolean'
|
||||
// }
|
||||
|
||||
// // Create a property for the array items
|
||||
// mut property := jsonschema.Schema{
|
||||
// typ: 'array'
|
||||
// }
|
||||
|
||||
// // Add the property to the schema
|
||||
// mut properties := map[string]jsonschema.Schema{}
|
||||
// properties['items'] = property
|
||||
|
||||
// return jsonschema.Schema{
|
||||
// typ: 'array',
|
||||
// properties: properties
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Default to string type for unknown types
|
||||
// return jsonschema.Schema{
|
||||
// typ: 'string'
|
||||
// }
|
||||
// }
|
||||
|
||||
// parse_struct_fields parses a V language struct definition string and returns a map of field names to their types
|
||||
fn parse_struct_fields(struct_def string) map[string]string {
|
||||
mut fields := map[string]string{}
|
||||
|
||||
// Find the opening and closing braces of the struct definition
|
||||
start_idx := struct_def.index('{') or { return fields }
|
||||
end_idx := struct_def.last_index('}') or { return fields }
|
||||
|
||||
// Extract the content between the braces
|
||||
struct_content := struct_def[start_idx + 1..end_idx].trim_space()
|
||||
|
||||
// Split the content by newlines to get individual field definitions
|
||||
field_lines := struct_content.split('
|
||||
')
|
||||
|
||||
for line in field_lines {
|
||||
trimmed_line := line.trim_space()
|
||||
|
||||
// Skip empty lines and comments
|
||||
if trimmed_line == '' || trimmed_line.starts_with('//') {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle pub: or mut: prefixes
|
||||
mut field_def := trimmed_line
|
||||
if field_def.starts_with('pub:') || field_def.starts_with('mut:') {
|
||||
field_def = field_def.all_after(':').trim_space()
|
||||
}
|
||||
|
||||
// Split by whitespace to separate field name and type
|
||||
parts := field_def.split_any(' ')
|
||||
if parts.len < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
field_name := parts[0]
|
||||
field_type := parts[1..].join(' ')
|
||||
|
||||
// Handle attributes like @[json: 'name']
|
||||
if field_name.contains('@[') {
|
||||
continue
|
||||
}
|
||||
|
||||
fields[field_name] = field_type
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
@@ -1,9 +1,6 @@
|
||||
module baobab
|
||||
module mcpgen
|
||||
|
||||
import freeflowuniverse.herolib.mcp
|
||||
|
||||
@[heap]
|
||||
pub struct Baobab {}
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
|
||||
pub fn result_to_mcp_tool_contents[T](result T) []mcp.ToolContent {
|
||||
return [result_to_mcp_tool_content(result)]
|
||||
144
lib/ai/mcp/mcpgen/mcpgen_tools.v
Normal file
144
lib/ai/mcp/mcpgen/mcpgen_tools.v
Normal file
@@ -0,0 +1,144 @@
|
||||
module mcpgen
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.core.code
|
||||
import freeflowuniverse.herolib.schemas.jsonschema
|
||||
import x.json2 as json { Any }
|
||||
// import json
|
||||
|
||||
// create_mcp_tools_code MCP Tool
|
||||
// create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
|
||||
// returns an MCP Tool code in v for attaching the function to the mcp server
|
||||
// function_pointers: A list of function pointers to generate tools for
|
||||
|
||||
const create_mcp_tools_code_tool = mcp.Tool{
|
||||
name: 'create_mcp_tools_code'
|
||||
description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
|
||||
returns an MCP Tool code in v for attaching the function to the mcp server
|
||||
function_pointers: A list of function pointers to generate tools for'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'function_pointers': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'array'
|
||||
items: jsonschema.Items(jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'name': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
}
|
||||
required: ['name', 'module_path']
|
||||
}))
|
||||
})
|
||||
}
|
||||
required: ['function_pointers']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &MCPGen) create_mcp_tools_code_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
function_pointers := json.decode[[]FunctionPointer](arguments['function_pointers'].str())!
|
||||
result := d.create_mcp_tools_code(function_pointers) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: mcp.result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
|
||||
const create_mcp_tool_code_tool = mcp.Tool{
|
||||
name: 'create_mcp_tool_code'
|
||||
description: 'create_mcp_tool_code receives the name of a V language function string, and the path to the module in which it exists.
|
||||
returns an MCP Tool code in v for attaching the function to the mcp server'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'function_name': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
'module_path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
}
|
||||
required: ['function_name', 'module_path']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &MCPGen) create_mcp_tool_code_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
function_name := arguments['function_name'].str()
|
||||
module_path := arguments['module_path'].str()
|
||||
result := d.create_mcp_tool_code(function_name, module_path) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
|
||||
// Tool definition for the create_mcp_tool function
|
||||
const create_mcp_tool_const_tool = mcp.Tool{
|
||||
name: 'create_mcp_tool_const'
|
||||
description: 'Parses a V language function string and returns an MCP Tool struct. This tool analyzes function signatures, extracts parameters, and generates the appropriate MCP Tool representation.'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'function': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
'types': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'object'
|
||||
})
|
||||
}
|
||||
required: ['function']
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (d &MCPGen) create_mcp_tool_const_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
function := json.decode[code.Function](arguments['function'].str())!
|
||||
types := json.decode[map[string]string](arguments['types'].str())!
|
||||
result := d.create_mcp_tool(function, types) or { return mcp.error_tool_call_result(err) }
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: result_to_mcp_tool_contents[string](result.str())
|
||||
}
|
||||
}
|
||||
|
||||
// Tool definition for the create_mcp_tool_handler function
|
||||
const create_mcp_tool_handler_tool = mcp.Tool{
|
||||
name: 'create_mcp_tool_handler'
|
||||
description: 'Generates a tool handler for the create_mcp_tool function. This tool handler accepts function string and types map and returns an MCP ToolCallResult.'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'function': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
'types': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'object'
|
||||
})
|
||||
'result': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
})
|
||||
}
|
||||
required: ['function', 'result']
|
||||
}
|
||||
}
|
||||
|
||||
// Tool handler for the create_mcp_tool_handler function
|
||||
pub fn (d &MCPGen) create_mcp_tool_handler_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
function := json.decode[code.Function](arguments['function'].str())!
|
||||
types := json.decode[map[string]string](arguments['types'].str())!
|
||||
result_ := arguments['result'].str()
|
||||
result := d.create_mcp_tool_handler(function, types, result_) or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: result_to_mcp_tool_contents[string](result)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"function_pointers": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"module_path": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["name", "module_path"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["function_pointers"]
|
||||
}
|
||||
35
lib/ai/mcp/mcpgen/server.v
Normal file
35
lib/ai/mcp/mcpgen/server.v
Normal file
@@ -0,0 +1,35 @@
|
||||
module mcpgen
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp.logger
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
|
||||
@[heap]
|
||||
pub struct MCPGen {}
|
||||
|
||||
pub fn new_mcp_server(v &MCPGen) !&mcp.Server {
|
||||
logger.info('Creating new Developer MCP server')
|
||||
|
||||
// Initialize the server with the empty handlers map
|
||||
mut server := mcp.new_server(mcp.MemoryBackend{
|
||||
tools: {
|
||||
'create_mcp_tool_code': create_mcp_tool_code_tool
|
||||
'create_mcp_tool_const': create_mcp_tool_const_tool
|
||||
'create_mcp_tool_handler': create_mcp_tool_handler_tool
|
||||
'create_mcp_tools_code': create_mcp_tools_code_tool
|
||||
}
|
||||
tool_handlers: {
|
||||
'create_mcp_tool_code': v.create_mcp_tool_code_tool_handler
|
||||
'create_mcp_tool_const': v.create_mcp_tool_const_tool_handler
|
||||
'create_mcp_tool_handler': v.create_mcp_tool_handler_tool_handler
|
||||
'create_mcp_tools_code': v.create_mcp_tools_code_tool_handler
|
||||
}
|
||||
}, mcp.ServerParams{
|
||||
config: mcp.ServerConfiguration{
|
||||
server_info: mcp.ServerInfo{
|
||||
name: 'mcpgen'
|
||||
version: '1.0.0'
|
||||
}
|
||||
}
|
||||
})!
|
||||
return server
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
// @{tool_name} MCP Tool
|
||||
// @{tool.description}
|
||||
|
||||
const @{tool_name}_tool = @{tool.str()}
|
||||
|
||||
11
lib/ai/mcp/mcpgen/templates/tool_handler.v.template
Normal file
11
lib/ai/mcp/mcpgen/templates/tool_handler.v.template
Normal file
@@ -0,0 +1,11 @@
|
||||
pub fn (d &MCPGen) @{function.name}_tool_handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
@{decode_stmts}
|
||||
result := @{function_call}
|
||||
or {
|
||||
return mcp.error_tool_call_result(err)
|
||||
}
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: mcp.result_to_mcp_tool_contents[@{result.symbol()}](result)
|
||||
}
|
||||
}
|
||||
1
lib/ai/mcp/mcpgen/templates/tools_file.v.template
Normal file
1
lib/ai/mcp/mcpgen/templates/tools_file.v.template
Normal file
@@ -0,0 +1 @@
|
||||
@for import in
|
||||
93
lib/ai/mcp/model_configuration.v
Normal file
93
lib/ai/mcp/model_configuration.v
Normal file
@@ -0,0 +1,93 @@
|
||||
module mcp
|
||||
|
||||
import time
|
||||
import os
|
||||
import log
|
||||
import x.json2
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
const protocol_version = '2024-11-05'
|
||||
// MCP server implementation using stdio transport
|
||||
// Based on https://modelcontextprotocol.io/docs/concepts/transports
|
||||
|
||||
// ClientConfiguration represents the parameters for the initialize request
|
||||
pub struct ClientConfiguration {
|
||||
pub:
|
||||
protocol_version string @[json: 'protocolVersion']
|
||||
capabilities ClientCapabilities
|
||||
client_info ClientInfo @[json: 'clientInfo']
|
||||
}
|
||||
|
||||
// ClientCapabilities represents the client capabilities
|
||||
pub struct ClientCapabilities {
|
||||
pub:
|
||||
roots RootsCapability // Ability to provide filesystem roots
|
||||
sampling SamplingCapability // Support for LLM sampling requests
|
||||
experimental ExperimentalCapability // Describes support for non-standard experimental features
|
||||
}
|
||||
|
||||
// RootsCapability represents the roots capability
|
||||
pub struct RootsCapability {
|
||||
pub:
|
||||
list_changed bool @[json: 'listChanged']
|
||||
}
|
||||
|
||||
// SamplingCapability represents the sampling capability
|
||||
pub struct SamplingCapability {}
|
||||
|
||||
// ExperimentalCapability represents the experimental capability
|
||||
pub struct ExperimentalCapability {}
|
||||
|
||||
// ClientInfo represents the client information
|
||||
pub struct ClientInfo {
|
||||
pub:
|
||||
name string
|
||||
version string
|
||||
}
|
||||
|
||||
// ServerConfiguration represents the server configuration
|
||||
pub struct ServerConfiguration {
|
||||
pub:
|
||||
protocol_version string = '2024-11-05' @[json: 'protocolVersion']
|
||||
capabilities ServerCapabilities
|
||||
server_info ServerInfo @[json: 'serverInfo']
|
||||
}
|
||||
|
||||
// ServerCapabilities represents the server capabilities
|
||||
pub struct ServerCapabilities {
|
||||
pub:
|
||||
logging LoggingCapability
|
||||
prompts PromptsCapability
|
||||
resources ResourcesCapability
|
||||
tools ToolsCapability
|
||||
}
|
||||
|
||||
// LoggingCapability represents the logging capability
|
||||
pub struct LoggingCapability {
|
||||
}
|
||||
|
||||
// PromptsCapability represents the prompts capability
|
||||
pub struct PromptsCapability {
|
||||
pub:
|
||||
list_changed bool = true @[json: 'listChanged']
|
||||
}
|
||||
|
||||
// ResourcesCapability represents the resources capability
|
||||
pub struct ResourcesCapability {
|
||||
pub:
|
||||
subscribe bool = true @[json: 'subscribe']
|
||||
list_changed bool = true @[json: 'listChanged']
|
||||
}
|
||||
|
||||
// ToolsCapability represents the tools capability
|
||||
pub struct ToolsCapability {
|
||||
pub:
|
||||
list_changed bool = true @[json: 'listChanged']
|
||||
}
|
||||
|
||||
// ServerInfo represents the server information
|
||||
pub struct ServerInfo {
|
||||
pub:
|
||||
name string = 'HeroLibMCPServer'
|
||||
version string = '1.0.0'
|
||||
}
|
||||
91
lib/ai/mcp/model_configuration_test.v
Normal file
91
lib/ai/mcp/model_configuration_test.v
Normal file
@@ -0,0 +1,91 @@
|
||||
module mcp
|
||||
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
import json
|
||||
|
||||
// This file contains tests for the MCP initialize handler implementation.
|
||||
// It tests the handler's ability to process initialize requests according to the MCP specification.
|
||||
|
||||
// test_json_serialization_deserialization tests the JSON serialization and deserialization of initialize request and response
|
||||
fn test_json_serialization_deserialization() {
|
||||
// Create a sample initialize params object
|
||||
params := ClientConfiguration{
|
||||
protocol_version: '2024-11-05'
|
||||
capabilities: ClientCapabilities{
|
||||
roots: RootsCapability{
|
||||
list_changed: true
|
||||
}
|
||||
// sampling: SamplingCapability{}
|
||||
}
|
||||
client_info: ClientInfo{
|
||||
name: 'mcp-inspector'
|
||||
// version: '0.0.1'
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the params to JSON
|
||||
params_json := json.encode(params)
|
||||
|
||||
// Verify the JSON structure has the correct camelCase keys
|
||||
assert params_json.contains('"protocolVersion":"2024-11-05"'), 'JSON should have protocolVersion in camelCase'
|
||||
assert params_json.contains('"clientInfo":{'), 'JSON should have clientInfo in camelCase'
|
||||
assert params_json.contains('"listChanged":true'), 'JSON should have listChanged in camelCase'
|
||||
|
||||
// Deserialize the JSON back to a struct
|
||||
deserialized_params := json.decode(ClientConfiguration, params_json) or {
|
||||
assert false, 'Failed to deserialize params: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the deserialized object matches the original
|
||||
assert deserialized_params.protocol_version == params.protocol_version, 'Deserialized protocol_version should match original'
|
||||
assert deserialized_params.client_info.name == params.client_info.name, 'Deserialized client_info.name should match original'
|
||||
assert deserialized_params.client_info.version == params.client_info.version, 'Deserialized client_info.version should match original'
|
||||
assert deserialized_params.capabilities.roots.list_changed == params.capabilities.roots.list_changed, 'Deserialized capabilities.roots.list_changed should match original'
|
||||
|
||||
// Now test the response serialization/deserialization
|
||||
response := ServerConfiguration{
|
||||
protocol_version: '2024-11-05'
|
||||
capabilities: ServerCapabilities{
|
||||
logging: LoggingCapability{}
|
||||
prompts: PromptsCapability{
|
||||
list_changed: true
|
||||
}
|
||||
resources: ResourcesCapability{
|
||||
subscribe: true
|
||||
list_changed: true
|
||||
}
|
||||
tools: ToolsCapability{
|
||||
list_changed: true
|
||||
}
|
||||
}
|
||||
server_info: ServerInfo{
|
||||
name: 'HeroLibMCPServer'
|
||||
version: '1.0.0'
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the response to JSON
|
||||
response_json := json.encode(response)
|
||||
|
||||
// Verify the JSON structure has the correct camelCase keys
|
||||
assert response_json.contains('"protocolVersion":"2024-11-05"'), 'JSON should have protocolVersion in camelCase'
|
||||
assert response_json.contains('"serverInfo":{'), 'JSON should have serverInfo in camelCase'
|
||||
assert response_json.contains('"listChanged":true'), 'JSON should have listChanged in camelCase'
|
||||
assert response_json.contains('"subscribe":true'), 'JSON should have subscribe field'
|
||||
|
||||
// Deserialize the JSON back to a struct
|
||||
deserialized_response := json.decode(ServerConfiguration, response_json) or {
|
||||
assert false, 'Failed to deserialize response: ${err}'
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the deserialized object matches the original
|
||||
assert deserialized_response.protocol_version == response.protocol_version, 'Deserialized protocol_version should match original'
|
||||
assert deserialized_response.server_info.name == response.server_info.name, 'Deserialized server_info.name should match original'
|
||||
assert deserialized_response.server_info.version == response.server_info.version, 'Deserialized server_info.version should match original'
|
||||
assert deserialized_response.capabilities.prompts.list_changed == response.capabilities.prompts.list_changed, 'Deserialized capabilities.prompts.list_changed should match original'
|
||||
assert deserialized_response.capabilities.resources.subscribe == response.capabilities.resources.subscribe, 'Deserialized capabilities.resources.subscribe should match original'
|
||||
assert deserialized_response.capabilities.resources.list_changed == response.capabilities.resources.list_changed, 'Deserialized capabilities.resources.list_changed should match original'
|
||||
assert deserialized_response.capabilities.tools.list_changed == response.capabilities.tools.list_changed, 'Deserialized capabilities.tools.list_changed should match original'
|
||||
}
|
||||
42
lib/ai/mcp/model_error.v
Normal file
42
lib/ai/mcp/model_error.v
Normal file
@@ -0,0 +1,42 @@
|
||||
module mcp
|
||||
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
// resource_not_found indicates that the requested resource doesn't exist.
|
||||
// This error is returned when the resource specified in the request is not found.
|
||||
// Error code: -32002
|
||||
pub fn resource_not_found(uri string) jsonrpc.RPCError {
|
||||
return jsonrpc.RPCError{
|
||||
code: -32002
|
||||
message: 'Resource not found'
|
||||
data: 'The requested resource ${uri} was not found.'
|
||||
}
|
||||
}
|
||||
|
||||
fn prompt_not_found(name string) jsonrpc.RPCError {
|
||||
return jsonrpc.RPCError{
|
||||
code: -32602 // Invalid params
|
||||
message: 'Prompt not found: ${name}'
|
||||
}
|
||||
}
|
||||
|
||||
fn missing_required_argument(arg_name string) jsonrpc.RPCError {
|
||||
return jsonrpc.RPCError{
|
||||
code: -32602 // Invalid params
|
||||
message: 'Missing required argument: ${arg_name}'
|
||||
}
|
||||
}
|
||||
|
||||
fn tool_not_found(name string) jsonrpc.RPCError {
|
||||
return jsonrpc.RPCError{
|
||||
code: -32602 // Invalid params
|
||||
message: 'Tool not found: ${name}'
|
||||
}
|
||||
}
|
||||
|
||||
fn sampling_error(message string) jsonrpc.RPCError {
|
||||
return jsonrpc.RPCError{
|
||||
code: -32603 // Internal error
|
||||
message: 'Sampling error: ${message}'
|
||||
}
|
||||
}
|
||||
2
lib/ai/mcp/pugconvert/cmd/.gitignore
vendored
Normal file
2
lib/ai/mcp/pugconvert/cmd/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
main
|
||||
|
||||
16
lib/ai/mcp/pugconvert/cmd/compile.sh
Executable file
16
lib/ai/mcp/pugconvert/cmd/compile.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
export name="mcp_pugconvert"
|
||||
|
||||
# Change to the directory containing this script
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# Compile the V program
|
||||
v -n -w -gc none -cc tcc -d use_openssl -enable-globals main.v
|
||||
|
||||
# Ensure the binary is executable
|
||||
chmod +x main
|
||||
mv main ~/hero/bin/${name}
|
||||
|
||||
echo "Compilation successful. Binary '${name}' is ready."
|
||||
17
lib/ai/mcp/pugconvert/cmd/main.v
Normal file
17
lib/ai/mcp/pugconvert/cmd/main.v
Normal file
@@ -0,0 +1,17 @@
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp.servers.pugconvert.mcp
|
||||
|
||||
fn main() {
|
||||
// Create a new MCP server
|
||||
mut server := mcp.new_mcp_server() or {
|
||||
eprintln('Failed to create MCP server: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
// Start the server
|
||||
server.start() or {
|
||||
eprintln('Failed to start MCP server: ${err}')
|
||||
return
|
||||
}
|
||||
}
|
||||
203
lib/ai/mcp/pugconvert/logic/convertpug.v
Normal file
203
lib/ai/mcp/pugconvert/logic/convertpug.v
Normal file
@@ -0,0 +1,203 @@
|
||||
module pugconvert
|
||||
|
||||
import freeflowuniverse.herolib.clients.openai
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import json
|
||||
|
||||
pub fn convert_pug(mydir string) ! {
|
||||
mut d := pathlib.get_dir(path: mydir, create: false)!
|
||||
list := d.list(regex: [r'.*\.pug$'], include_links: false, files_only: true)!
|
||||
for item in list.paths {
|
||||
convert_pug_file(item.path)!
|
||||
}
|
||||
}
|
||||
|
||||
// extract_template parses AI response content to extract just the template
|
||||
fn extract_template(raw_content string) string {
|
||||
mut content := raw_content
|
||||
|
||||
// First check for </think> tag
|
||||
if content.contains('</think>') {
|
||||
content = content.split('</think>')[1].trim_space()
|
||||
}
|
||||
|
||||
// Look for ```jet code block
|
||||
if content.contains('```jet') {
|
||||
parts := content.split('```jet')
|
||||
if parts.len > 1 {
|
||||
end_parts := parts[1].split('```')
|
||||
if end_parts.len > 0 {
|
||||
content = end_parts[0].trim_space()
|
||||
}
|
||||
}
|
||||
} else if content.contains('```') {
|
||||
// If no ```jet, look for regular ``` code block
|
||||
parts := content.split('```')
|
||||
if parts.len >= 2 {
|
||||
// Take the content between the first set of ```
|
||||
// This handles both ```content``` and cases where there's only an opening ```
|
||||
content = parts[1].trim_space()
|
||||
|
||||
// If we only see an opening ``` but no closing, cleanup any remaining backticks
|
||||
// to avoid incomplete formatting markers
|
||||
if !content.contains('```') {
|
||||
content = content.replace('`', '')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return content
|
||||
}
|
||||
|
||||
pub fn convert_pug_file(myfile string) ! {
|
||||
println(myfile)
|
||||
|
||||
// Create new file path by replacing .pug extension with .jet
|
||||
jet_file := myfile.replace('.pug', '.jet')
|
||||
|
||||
// Check if jet file already exists, if so skip processing
|
||||
mut jet_path_exist := pathlib.get_file(path: jet_file, create: false)!
|
||||
if jet_path_exist.exists() {
|
||||
println('Jet file already exists: ${jet_file}. Skipping conversion.')
|
||||
return
|
||||
}
|
||||
|
||||
mut content_path := pathlib.get_file(path: myfile, create: false)!
|
||||
content := content_path.read()!
|
||||
|
||||
mut l := loader()
|
||||
mut client := openai.get()!
|
||||
|
||||
base_instruction := '
|
||||
You are a template language converter. You convert Pug templates to Jet templates.
|
||||
|
||||
The target template language, Jet, is defined as follows:
|
||||
'
|
||||
|
||||
base_user_prompt := '
|
||||
Convert this following Pug template to Jet:
|
||||
|
||||
only output the resulting template, no explanation, no steps, just the jet template
|
||||
'
|
||||
|
||||
// We'll retry up to 5 times if validation fails
|
||||
max_attempts := 5
|
||||
mut attempts := 0
|
||||
mut is_valid := false
|
||||
mut error_message := ''
|
||||
mut template := ''
|
||||
|
||||
for attempts < max_attempts && !is_valid {
|
||||
attempts++
|
||||
|
||||
mut system_content := texttools.dedent(base_instruction) + '\n' + l.jet()
|
||||
mut user_prompt := ''
|
||||
|
||||
// Create different prompts for first attempt vs retries
|
||||
if attempts == 1 {
|
||||
// First attempt - convert from PUG
|
||||
user_prompt = texttools.dedent(base_user_prompt) + '\n' + content
|
||||
|
||||
// Print what we're sending to the AI service
|
||||
println('Sending to OpenAI for conversion:')
|
||||
println('--------------------------------')
|
||||
println(content)
|
||||
println('--------------------------------')
|
||||
} else {
|
||||
// Retries - focus on fixing the previous errors
|
||||
println('Attempt ${attempts}: Retrying with error feedback')
|
||||
user_prompt = '
|
||||
The previous Jet template conversion had the following error:
|
||||
ERROR: ${error_message}
|
||||
|
||||
Here was the template that had errors:
|
||||
```
|
||||
${template}
|
||||
```
|
||||
|
||||
The original pug input was was
|
||||
```
|
||||
${content}
|
||||
```
|
||||
|
||||
Please fix the template and try again. Learn from feedback and check which jet template was created.
|
||||
Return only the corrected Jet template.
|
||||
Dont send back more information than the fixed template, make sure its in jet format.
|
||||
|
||||
' // Print what we're sending for the retry
|
||||
|
||||
println('Sending to OpenAI for correction:')
|
||||
println('--------------------------------')
|
||||
println(user_prompt)
|
||||
println('--------------------------------')
|
||||
}
|
||||
|
||||
mut m := openai.Messages{
|
||||
messages: [
|
||||
openai.Message{
|
||||
role: .system
|
||||
content: system_content
|
||||
},
|
||||
openai.Message{
|
||||
role: .user
|
||||
content: user_prompt
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
// Create a chat completion request
|
||||
res := client.chat_completion(
|
||||
msgs: m
|
||||
model: 'deepseek-r1-distill-llama-70b'
|
||||
max_completion_tokens: 64000
|
||||
)!
|
||||
|
||||
println('-----')
|
||||
|
||||
// Print AI response before extraction
|
||||
println('Response received from AI:')
|
||||
println('--------------------------------')
|
||||
println(res.choices[0].message.content)
|
||||
println('--------------------------------')
|
||||
|
||||
// Extract the template from the AI response
|
||||
template = extract_template(res.choices[0].message.content)
|
||||
|
||||
println('Extracted template for ${myfile}:')
|
||||
println('--------------------------------')
|
||||
println(template)
|
||||
println('--------------------------------')
|
||||
|
||||
// Validate the template
|
||||
validation_result := jetvaliditycheck(template) or {
|
||||
// If validation service is unavailable, we'll just proceed with the template
|
||||
println('Warning: Template validation service unavailable: ${err}')
|
||||
break
|
||||
}
|
||||
|
||||
// Check if template is valid
|
||||
if validation_result.is_valid {
|
||||
is_valid = true
|
||||
println('Template validation successful!')
|
||||
} else {
|
||||
error_message = validation_result.error
|
||||
println('Template validation failed: ${error_message}')
|
||||
}
|
||||
}
|
||||
|
||||
// Report the validation outcome
|
||||
if is_valid {
|
||||
println('Successfully converted template after ${attempts} attempt(s)')
|
||||
// Create the file and write the processed content
|
||||
println('Converted to: ${jet_file}')
|
||||
mut jet_path := pathlib.get_file(path: jet_file, create: true)!
|
||||
jet_path.write(template)!
|
||||
} else if attempts >= max_attempts {
|
||||
println('Warning: Could not validate template after ${max_attempts} attempts')
|
||||
println('Using best attempt despite validation errors: ${error_message}')
|
||||
jet_file2 := jet_file.replace('.jet', '_error.jet')
|
||||
mut jet_path2 := pathlib.get_file(path: jet_file2, create: true)!
|
||||
jet_path2.write(template)!
|
||||
}
|
||||
}
|
||||
85
lib/ai/mcp/pugconvert/logic/jetvalidation.v
Normal file
85
lib/ai/mcp/pugconvert/logic/jetvalidation.v
Normal file
@@ -0,0 +1,85 @@
|
||||
module pugconvert
|
||||
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import json
|
||||
|
||||
// JetTemplateResponse is the expected response structure from the validation service
|
||||
struct JetTemplateResponse {
|
||||
valid bool
|
||||
message string
|
||||
error string
|
||||
}
|
||||
|
||||
// ValidationResult represents the result of a template validation
|
||||
pub struct ValidationResult {
|
||||
pub:
|
||||
is_valid bool
|
||||
error string
|
||||
}
|
||||
|
||||
// jetvaliditycheck validates a Jet template by sending it to a validation service
|
||||
// The function sends the template to http://localhost:9020/checkjet for validation
|
||||
// Returns a ValidationResult containing validity status and any error messages
|
||||
pub fn jetvaliditycheck(jetcontent string) !ValidationResult {
|
||||
// Create HTTP connection to the validation service
|
||||
mut conn := httpconnection.HTTPConnection{
|
||||
base_url: 'http://localhost:9020'
|
||||
}
|
||||
|
||||
// Prepare the request data - template content wrapped in JSON
|
||||
template_data := json.encode({
|
||||
'template': jetcontent
|
||||
})
|
||||
|
||||
// Print what we're sending to the AI service
|
||||
// println('Sending to JET validation service:')
|
||||
// println('--------------------------------')
|
||||
// println(jetcontent)
|
||||
// println('--------------------------------')
|
||||
|
||||
// Send the POST request to the validation endpoint
|
||||
req := httpconnection.Request{
|
||||
prefix: 'checkjet'
|
||||
data: template_data
|
||||
dataformat: .json
|
||||
}
|
||||
|
||||
// Execute the request
|
||||
result := conn.post_json_str(req) or {
|
||||
// Handle connection errors
|
||||
return ValidationResult{
|
||||
is_valid: false
|
||||
error: 'Connection error: ${err}'
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to parse the response as JSON using the expected struct
|
||||
response := json.decode(JetTemplateResponse, result) or {
|
||||
// If we can't parse JSON using our struct, the server didn't return the expected format
|
||||
return ValidationResult{
|
||||
is_valid: false
|
||||
error: 'Server returned unexpected format: ${err.msg()}'
|
||||
}
|
||||
}
|
||||
|
||||
// Use the structured response data
|
||||
if response.valid == false {
|
||||
error_msg := if response.error != '' {
|
||||
response.error
|
||||
} else if response.message != '' {
|
||||
response.message
|
||||
} else {
|
||||
'Unknown validation error'
|
||||
}
|
||||
|
||||
return ValidationResult{
|
||||
is_valid: false
|
||||
error: error_msg
|
||||
}
|
||||
}
|
||||
|
||||
return ValidationResult{
|
||||
is_valid: true
|
||||
error: ''
|
||||
}
|
||||
}
|
||||
25
lib/ai/mcp/pugconvert/logic/loader.v
Normal file
25
lib/ai/mcp/pugconvert/logic/loader.v
Normal file
@@ -0,0 +1,25 @@
|
||||
module pugconvert
|
||||
|
||||
import v.embed_file
|
||||
import os
|
||||
|
||||
@[heap]
|
||||
pub struct FileLoader {
|
||||
pub mut:
|
||||
embedded_files map[string]embed_file.EmbedFileData @[skip; str: skip]
|
||||
}
|
||||
|
||||
fn (mut loader FileLoader) load() {
|
||||
loader.embedded_files['jet'] = $embed_file('templates/jet_instructions.md')
|
||||
}
|
||||
|
||||
fn (mut loader FileLoader) jet() string {
|
||||
c := loader.embedded_files['jet'] or { panic('bug embed') }
|
||||
return c.to_string()
|
||||
}
|
||||
|
||||
fn loader() FileLoader {
|
||||
mut loader := FileLoader{}
|
||||
loader.load()
|
||||
return loader
|
||||
}
|
||||
446
lib/ai/mcp/pugconvert/logic/templates/jet_instructions.md
Normal file
446
lib/ai/mcp/pugconvert/logic/templates/jet_instructions.md
Normal file
@@ -0,0 +1,446 @@
|
||||
# Jet Template Engine Syntax Reference
|
||||
|
||||
## Delimiters
|
||||
|
||||
Template delimiters are `{{` and `}}`.
|
||||
Delimiters can use `.` to output the execution context:
|
||||
|
||||
```jet
|
||||
hello {{ . }} <!-- context = "world" => "hello world" -->
|
||||
```
|
||||
|
||||
### Whitespace Trimming
|
||||
|
||||
Whitespace around delimiters can be trimmed using `{{-` and `-}}`:
|
||||
|
||||
```jet
|
||||
foo {{- "bar" -}} baz <!-- outputs "foobarbaz" -->
|
||||
```
|
||||
|
||||
Whitespace includes spaces, tabs, carriage returns, and newlines.
|
||||
|
||||
### Comments
|
||||
|
||||
Comments use `{* ... *}`:
|
||||
|
||||
```jet
|
||||
{* this is a comment *}
|
||||
|
||||
{*
|
||||
Multiline
|
||||
{{ expressions }} are ignored
|
||||
*}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Variables
|
||||
|
||||
### Initialization
|
||||
|
||||
```jet
|
||||
{{ foo := "bar" }}
|
||||
```
|
||||
|
||||
### Assignment
|
||||
|
||||
```jet
|
||||
{{ foo = "asd" }}
|
||||
{{ foo = 4711 }}
|
||||
```
|
||||
|
||||
Skip assignment but still evaluate:
|
||||
|
||||
```jet
|
||||
{{ _ := stillRuns() }}
|
||||
{{ _ = stillRuns() }}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Expressions
|
||||
|
||||
### Identifiers
|
||||
|
||||
Identifiers resolve to values:
|
||||
|
||||
```jet
|
||||
{{ len("hello") }}
|
||||
{{ isset(foo, bar) }}
|
||||
```
|
||||
|
||||
### Indexing
|
||||
|
||||
#### String
|
||||
|
||||
```jet
|
||||
{{ s := "helloworld" }}
|
||||
{{ s[1] }} <!-- 101 (ASCII of 'e') -->
|
||||
```
|
||||
|
||||
#### Slice / Array
|
||||
|
||||
```jet
|
||||
{{ s := slice("foo", "bar", "asd") }}
|
||||
{{ s[0] }}
|
||||
{{ s[2] }}
|
||||
```
|
||||
|
||||
#### Map
|
||||
|
||||
```jet
|
||||
{{ m := map("foo", 123, "bar", 456) }}
|
||||
{{ m["foo"] }}
|
||||
```
|
||||
|
||||
#### Struct
|
||||
|
||||
```jet
|
||||
{{ user["Name"] }}
|
||||
```
|
||||
|
||||
### Field Access
|
||||
|
||||
#### Map
|
||||
|
||||
```jet
|
||||
{{ m.foo }}
|
||||
{{ range s }}
|
||||
{{ .foo }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
#### Struct
|
||||
|
||||
```jet
|
||||
{{ user.Name }}
|
||||
{{ range users }}
|
||||
{{ .Name }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
### Slicing
|
||||
|
||||
```jet
|
||||
{{ s := slice(6, 7, 8, 9, 10, 11) }}
|
||||
{{ sevenEightNine := s[1:4] }}
|
||||
```
|
||||
|
||||
### Arithmetic
|
||||
|
||||
```jet
|
||||
{{ 1 + 2 * 3 - 4 }}
|
||||
{{ (1 + 2) * 3 - 4.1 }}
|
||||
```
|
||||
|
||||
### String Concatenation
|
||||
|
||||
```jet
|
||||
{{ "HELLO" + " " + "WORLD!" }}
|
||||
```
|
||||
|
||||
#### Logical Operators
|
||||
|
||||
- `&&`
|
||||
- `||`
|
||||
- `!`
|
||||
- `==`, `!=`
|
||||
- `<`, `>`, `<=`, `>=`
|
||||
|
||||
```jet
|
||||
{{ item == true || !item2 && item3 != "test" }}
|
||||
{{ item >= 12.5 || item < 6 }}
|
||||
```
|
||||
|
||||
### Ternary Operator
|
||||
|
||||
```jet
|
||||
<title>{{ .HasTitle ? .Title : "Title not set" }}</title>
|
||||
```
|
||||
|
||||
### Method Calls
|
||||
|
||||
```jet
|
||||
{{ user.Rename("Peter") }}
|
||||
{{ range users }}
|
||||
{{ .FullName() }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
### Function Calls
|
||||
|
||||
```jet
|
||||
{{ len(s) }}
|
||||
{{ isset(foo, bar) }}
|
||||
```
|
||||
|
||||
#### Prefix Syntax
|
||||
|
||||
```jet
|
||||
{{ len: s }}
|
||||
{{ isset: foo, bar }}
|
||||
```
|
||||
|
||||
#### Pipelining
|
||||
|
||||
```jet
|
||||
{{ "123" | len }}
|
||||
{{ "FOO" | lower | len }}
|
||||
{{ "hello" | repeat: 2 | len }}
|
||||
```
|
||||
|
||||
**Escapers must be last in a pipeline:**
|
||||
|
||||
```jet
|
||||
{{ "hello" | upper | raw }} <!-- valid -->
|
||||
{{ raw: "hello" }} <!-- valid -->
|
||||
{{ raw: "hello" | upper }} <!-- invalid -->
|
||||
```
|
||||
|
||||
#### Piped Argument Slot
|
||||
|
||||
```jet
|
||||
{{ 2 | repeat("foo", _) }}
|
||||
{{ 2 | repeat("foo", _) | repeat(_, 3) }}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Control Structures
|
||||
|
||||
### if
|
||||
|
||||
```jet
|
||||
{{ if foo == "asd" }}
|
||||
foo is 'asd'!
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
#### if / else
|
||||
|
||||
```jet
|
||||
{{ if foo == "asd" }}
|
||||
...
|
||||
{{ else }}
|
||||
...
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
#### if / else if
|
||||
|
||||
```jet
|
||||
{{ if foo == "asd" }}
|
||||
{{ else if foo == 4711 }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
#### if / else if / else
|
||||
|
||||
```jet
|
||||
{{ if foo == "asd" }}
|
||||
{{ else if foo == 4711 }}
|
||||
{{ else }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
### range
|
||||
|
||||
#### Slices / Arrays
|
||||
|
||||
```jet
|
||||
{{ range s }}
|
||||
{{ . }}
|
||||
{{ end }}
|
||||
|
||||
{{ range i := s }}
|
||||
{{ i }}: {{ . }}
|
||||
{{ end }}
|
||||
|
||||
{{ range i, v := s }}
|
||||
{{ i }}: {{ v }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
#### Maps
|
||||
|
||||
```jet
|
||||
{{ range k := m }}
|
||||
{{ k }}: {{ . }}
|
||||
{{ end }}
|
||||
|
||||
{{ range k, v := m }}
|
||||
{{ k }}: {{ v }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
#### Channels
|
||||
|
||||
```jet
|
||||
{{ range v := c }}
|
||||
{{ v }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
#### Custom Ranger
|
||||
|
||||
Any Go type implementing `Ranger` can be ranged over.
|
||||
|
||||
#### else
|
||||
|
||||
```jet
|
||||
{{ range searchResults }}
|
||||
{{ . }}
|
||||
{{ else }}
|
||||
No results found :(
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
### try
|
||||
|
||||
```jet
|
||||
{{ try }}
|
||||
{{ foo }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
### try / catch
|
||||
|
||||
```jet
|
||||
{{ try }}
|
||||
{{ foo }}
|
||||
{{ catch }}
|
||||
Fallback content
|
||||
{{ end }}
|
||||
|
||||
{{ try }}
|
||||
{{ foo }}
|
||||
{{ catch err }}
|
||||
{{ log(err.Error()) }}
|
||||
Error: {{ err.Error() }}
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Templates
|
||||
|
||||
### include
|
||||
|
||||
```jet
|
||||
{{ include "./user.jet" }}
|
||||
|
||||
<!-- user.jet -->
|
||||
<div class="user">
|
||||
{{ .["name"] }}: {{ .["email"] }}
|
||||
</div>
|
||||
```
|
||||
|
||||
### return
|
||||
|
||||
```jet
|
||||
<!-- foo.jet -->
|
||||
{{ return "foo" }}
|
||||
|
||||
<!-- bar.jet -->
|
||||
{{ foo := exec("./foo.jet") }}
|
||||
Hello, {{ foo }}!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Blocks
|
||||
|
||||
### block
|
||||
|
||||
```jet
|
||||
{{ block copyright() }}
|
||||
<div>© ACME, Inc. 2020</div>
|
||||
{{ end }}
|
||||
|
||||
{{ block inputField(type="text", label, id, value="", required=false) }}
|
||||
<label for="{{ id }}">{{ label }}</label>
|
||||
<input type="{{ type }}" value="{{ value }}" id="{{ id }}" {{ required ? "required" : "" }} />
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
### yield
|
||||
|
||||
```jet
|
||||
{{ yield copyright() }}
|
||||
|
||||
{{ yield inputField(id="firstname", label="First name", required=true) }}
|
||||
|
||||
{{ block buff() }}
|
||||
<strong>{{ . }}</strong>
|
||||
{{ end }}
|
||||
|
||||
{{ yield buff() "Batman" }}
|
||||
```
|
||||
|
||||
### content
|
||||
|
||||
```jet
|
||||
{{ block link(target) }}
|
||||
<a href="{{ target }}">{{ yield content }}</a>
|
||||
{{ end }}
|
||||
|
||||
{{ yield link(target="https://example.com") content }}
|
||||
Example Inc.
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
```jet
|
||||
{{ block header() }}
|
||||
<div class="header">
|
||||
{{ yield content }}
|
||||
</div>
|
||||
{{ content }}
|
||||
<h1>Hey {{ name }}!</h1>
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
### Recursion
|
||||
|
||||
```jet
|
||||
{{ block menu() }}
|
||||
<ul>
|
||||
{{ range . }}
|
||||
<li>{{ .Text }}{{ if len(.Children) }}{{ yield menu() .Children }}{{ end }}</li>
|
||||
{{ end }}
|
||||
</ul>
|
||||
{{ end }}
|
||||
```
|
||||
|
||||
### extends
|
||||
|
||||
```jet
|
||||
<!-- content.jet -->
|
||||
{{ extends "./layout.jet" }}
|
||||
{{ block body() }}
|
||||
<main>This content can be yielded anywhere.</main>
|
||||
{{ end }}
|
||||
|
||||
<!-- layout.jet -->
|
||||
<html>
|
||||
<body>
|
||||
{{ yield body() }}
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
|
||||
### import
|
||||
|
||||
```jet
|
||||
<!-- my_blocks.jet -->
|
||||
{{ block body() }}
|
||||
<main>This content can be yielded anywhere.</main>
|
||||
{{ end }}
|
||||
|
||||
<!-- index.jet -->
|
||||
{{ import "./my_blocks.jet" }}
|
||||
<html>
|
||||
<body>
|
||||
{{ yield body() }}
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
54
lib/ai/mcp/pugconvert/mcp/handlers.v
Normal file
54
lib/ai/mcp/pugconvert/mcp/handlers.v
Normal file
@@ -0,0 +1,54 @@
|
||||
module mcp
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import x.json2 as json { Any }
|
||||
import freeflowuniverse.herolib.ai.mcp.aitools.pugconvert
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import os
|
||||
|
||||
pub fn handler(arguments map[string]Any) !mcp.ToolCallResult {
|
||||
path := arguments['path'].str()
|
||||
|
||||
// Check if path exists
|
||||
if !os.exists(path) {
|
||||
return mcp.ToolCallResult{
|
||||
is_error: true
|
||||
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' does not exist")
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if path is a file or directory
|
||||
is_directory := os.is_dir(path)
|
||||
|
||||
mut message := ''
|
||||
|
||||
if is_directory {
|
||||
// Convert all pug files in the directory
|
||||
pugconvert.convert_pug(path) or {
|
||||
return mcp.ToolCallResult{
|
||||
is_error: true
|
||||
content: mcp.result_to_mcp_tool_contents[string]('Error converting pug files in directory: ${err}')
|
||||
}
|
||||
}
|
||||
message = "Successfully converted all pug files in directory '${path}'"
|
||||
} else if path.ends_with('.pug') {
|
||||
// Convert a single pug file
|
||||
pugconvert.convert_pug_file(path) or {
|
||||
return mcp.ToolCallResult{
|
||||
is_error: true
|
||||
content: mcp.result_to_mcp_tool_contents[string]('Error converting pug file: ${err}')
|
||||
}
|
||||
}
|
||||
message = "Successfully converted pug file '${path}'"
|
||||
} else {
|
||||
return mcp.ToolCallResult{
|
||||
is_error: true
|
||||
content: mcp.result_to_mcp_tool_contents[string]("Error: Path '${path}' is not a directory or .pug file")
|
||||
}
|
||||
}
|
||||
|
||||
return mcp.ToolCallResult{
|
||||
is_error: false
|
||||
content: mcp.result_to_mcp_tool_contents[string](message)
|
||||
}
|
||||
}
|
||||
27
lib/ai/mcp/pugconvert/mcp/mcp.v
Normal file
27
lib/ai/mcp/pugconvert/mcp/mcp.v
Normal file
@@ -0,0 +1,27 @@
|
||||
module mcp
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.ai.mcp.logger
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
|
||||
pub fn new_mcp_server() !&mcp.Server {
|
||||
logger.info('Creating new Developer MCP server')
|
||||
|
||||
// Initialize the server with the empty handlers map
|
||||
mut server := mcp.new_server(mcp.MemoryBackend{
|
||||
tools: {
|
||||
'pugconvert': specs
|
||||
}
|
||||
tool_handlers: {
|
||||
'pugconvert': handler
|
||||
}
|
||||
}, mcp.ServerParams{
|
||||
config: mcp.ServerConfiguration{
|
||||
server_info: mcp.ServerInfo{
|
||||
name: 'developer'
|
||||
version: '1.0.0'
|
||||
}
|
||||
}
|
||||
})!
|
||||
return server
|
||||
}
|
||||
21
lib/ai/mcp/pugconvert/mcp/specifications.v
Normal file
21
lib/ai/mcp/pugconvert/mcp/specifications.v
Normal file
@@ -0,0 +1,21 @@
|
||||
module mcp
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import x.json2 as json
|
||||
import freeflowuniverse.herolib.schemas.jsonschema
|
||||
import freeflowuniverse.herolib.ai.mcp.logger
|
||||
|
||||
const specs = mcp.Tool{
|
||||
name: 'pugconvert'
|
||||
description: 'Convert Pug template files to Jet template files'
|
||||
input_schema: jsonschema.Schema{
|
||||
typ: 'object'
|
||||
properties: {
|
||||
'path': jsonschema.SchemaRef(jsonschema.Schema{
|
||||
typ: 'string'
|
||||
description: 'Path to a .pug file or directory containing .pug files to convert'
|
||||
})
|
||||
}
|
||||
required: ['path']
|
||||
}
|
||||
}
|
||||
2
lib/ai/mcp/rhai/cmd/.gitignore
vendored
Normal file
2
lib/ai/mcp/rhai/cmd/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
main
|
||||
|
||||
16
lib/ai/mcp/rhai/cmd/compile.sh
Executable file
16
lib/ai/mcp/rhai/cmd/compile.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
export name="mcp_rhai"
|
||||
|
||||
# Change to the directory containing this script
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# Compile the V program
|
||||
v -n -w -gc none -cc tcc -d use_openssl -enable-globals main.v
|
||||
|
||||
# Ensure the binary is executable
|
||||
chmod +x main
|
||||
mv main ~/hero/bin/${name}
|
||||
|
||||
echo "Compilation successful. Binary '${name}' is ready."
|
||||
18
lib/ai/mcp/rhai/cmd/main.v
Normal file
18
lib/ai/mcp/rhai/cmd/main.v
Normal file
@@ -0,0 +1,18 @@
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp.rhai.mcp
|
||||
import log
|
||||
|
||||
fn main() {
|
||||
// Create a new MCP server
|
||||
mut server := mcp.new_mcp_server() or {
|
||||
log.error('Failed to create MCP server: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
// Start the server
|
||||
server.start() or {
|
||||
log.error('Failed to start MCP server: ${err}')
|
||||
return
|
||||
}
|
||||
}
|
||||
532
lib/ai/mcp/rhai/example/example copy.vsh
Normal file
532
lib/ai/mcp/rhai/example/example copy.vsh
Normal file
@@ -0,0 +1,532 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
|
||||
import os
|
||||
|
||||
fn main() {
|
||||
// Get the current directory
|
||||
current_dir := os.dir(@FILE)
|
||||
|
||||
// Check if a source code path was provided as an argument
|
||||
if os.args.len < 2 {
|
||||
println('Please provide the path to the source code directory as an argument')
|
||||
println('Example: ./example.vsh /path/to/source/code/directory')
|
||||
return
|
||||
}
|
||||
|
||||
// Get the source code path from the command line arguments
|
||||
source_code_path := os.args[1]
|
||||
|
||||
// Check if the path exists and is a directory
|
||||
if !os.exists(source_code_path) {
|
||||
println('Source code path does not exist: ${source_code_path}')
|
||||
return
|
||||
}
|
||||
|
||||
if !os.is_dir(source_code_path) {
|
||||
println('Source code path is not a directory: ${source_code_path}')
|
||||
return
|
||||
}
|
||||
|
||||
// Get all Rust files in the directory
|
||||
files := os.ls(source_code_path) or {
|
||||
println('Failed to list files in directory: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
// Combine all Rust files into a single source code string
|
||||
mut source_code := ''
|
||||
for file in files {
|
||||
file_path := os.join_path(source_code_path, file)
|
||||
|
||||
// Skip directories and non-Rust files
|
||||
if os.is_dir(file_path) || !file.ends_with('.rs') {
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the file content
|
||||
file_content := os.read_file(file_path) or {
|
||||
println('Failed to read file ${file_path}: ${err}')
|
||||
continue
|
||||
}
|
||||
|
||||
// Add file content to the combined source code
|
||||
source_code += '// File: ${file}\n${file_content}\n\n'
|
||||
}
|
||||
|
||||
if source_code == '' {
|
||||
println('No Rust files found in directory: ${source_code_path}')
|
||||
return
|
||||
}
|
||||
|
||||
// Read the rhaiwrapping.md file
|
||||
rhai_wrapping_md := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping.md') or {
|
||||
println('Failed to read rhaiwrapping.md: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
// Determine the crate path from the source code path
|
||||
// Extract the path relative to the src directory
|
||||
src_index := source_code_path.index('src/') or {
|
||||
println('Could not determine crate path: src/ not found in path')
|
||||
return
|
||||
}
|
||||
|
||||
mut path_parts := source_code_path[src_index + 4..].split('/')
|
||||
// Remove the last part (the file name)
|
||||
if path_parts.len > 0 {
|
||||
path_parts.delete_last()
|
||||
}
|
||||
rel_path := path_parts.join('::')
|
||||
crate_path := 'sal::${rel_path}'
|
||||
|
||||
// Create a new task
|
||||
mut task := escalayer.new_task(
|
||||
name: 'rhai_wrapper_creator.escalayer'
|
||||
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
|
||||
)
|
||||
|
||||
// Create model configs
|
||||
sonnet_model := escalayer.ModelConfig{
|
||||
name: 'anthropic/claude-3.7-sonnet'
|
||||
provider: 'anthropic'
|
||||
temperature: 0.7
|
||||
max_tokens: 25000
|
||||
}
|
||||
|
||||
gpt4_model := escalayer.ModelConfig{
|
||||
name: 'gpt-4'
|
||||
provider: 'openai'
|
||||
temperature: 0.7
|
||||
max_tokens: 25000
|
||||
}
|
||||
|
||||
// Extract the module name from the directory path (last component)
|
||||
dir_parts := source_code_path.split('/')
|
||||
name := dir_parts[dir_parts.len - 1]
|
||||
|
||||
// Create the prompt with source code, wrapper example, and rhai_wrapping_md
|
||||
prompt_content := create_rhai_wrappers(name, source_code, os.read_file('${current_dir}/prompts/example_script.md') or {
|
||||
''
|
||||
}, os.read_file('${current_dir}/prompts/wrapper.md') or { '' }, os.read_file('${current_dir}/prompts/errors.md') or {
|
||||
''
|
||||
}, crate_path)
|
||||
|
||||
// Create a prompt function that returns the prepared content
|
||||
prompt_function := fn [prompt_content] (input string) string {
|
||||
return prompt_content
|
||||
}
|
||||
|
||||
gen := RhaiGen{
|
||||
name: name
|
||||
dir: source_code_path
|
||||
}
|
||||
|
||||
// Define a single unit task that handles everything
|
||||
task.new_unit_task(
|
||||
name: 'create_rhai_wrappers'
|
||||
prompt_function: prompt_function
|
||||
callback_function: gen.process_rhai_wrappers
|
||||
base_model: sonnet_model
|
||||
retry_model: gpt4_model
|
||||
retry_count: 1
|
||||
)
|
||||
|
||||
// Initiate the task
|
||||
result := task.initiate('') or {
|
||||
println('Task failed: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
println('Task completed successfully')
|
||||
println('The wrapper files have been generated and compiled in the target directory.')
|
||||
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
|
||||
}
|
||||
|
||||
// Define the prompt functions
|
||||
fn separate_functions(input string) string {
|
||||
return 'Read the following Rust code and separate it into functions. Identify all the methods in the Container implementation and their purposes.\n\n${input}'
|
||||
}
|
||||
|
||||
fn create_wrappers(input string) string {
|
||||
return 'Create Rhai wrappers for the Rust functions identified in the previous step. The wrappers should follow the builder pattern and provide a clean API for use in Rhai scripts. Include error handling and type conversion.\n\n${input}'
|
||||
}
|
||||
|
||||
fn create_example(input string) string {
|
||||
return 'Create a Rhai example script that demonstrates how to use the wrapper functions. The example should be based on the provided example.rs file but adapted for Rhai syntax. Create a web server example that uses the container functions.\n\n${input}'
|
||||
}
|
||||
|
||||
// Define a Rhai wrapper generator function for Container functions
|
||||
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
|
||||
guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md') or {
|
||||
panic('Failed to read guides')
|
||||
}
|
||||
engine := $tmpl('./prompts/engine.md')
|
||||
vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md') or {
|
||||
panic('Failed to read guides')
|
||||
}
|
||||
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md') or {
|
||||
panic('Failed to read guides')
|
||||
}
|
||||
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md') or {
|
||||
panic('Failed to read guides')
|
||||
}
|
||||
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
|
||||
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
|
||||
${guides}
|
||||
${vector_vs_array}
|
||||
${example_rhai}
|
||||
${wrapper_md}
|
||||
|
||||
## Common Errors to Avoid
|
||||
${errors_md}
|
||||
${rhai_integration_fixes}
|
||||
${rhai_syntax_guide}
|
||||
|
||||
## Your Task
|
||||
|
||||
Please create a wrapper.rs file that implements Rhai wrappers for the provided Rust code, and an example.rhai script that demonstrates how to use these wrappers:
|
||||
|
||||
## Rust Code to Wrap
|
||||
|
||||
```rust
|
||||
${source_code}
|
||||
```
|
||||
|
||||
IMPORTANT NOTES:
|
||||
1. For Rhai imports, use: `use rhai::{Engine, EvalAltResult, plugin::*, Dynamic, Map, Array};` - only import what you actually use
|
||||
2. The following dependencies are available in Cargo.toml:
|
||||
- rhai = "1.21.0"
|
||||
- serde = { version = "1.0", features = ["derive"] }
|
||||
- serde_json = "1.0"
|
||||
- sal = { path = "../../../" }
|
||||
|
||||
3. For the wrapper: `use sal::${name};` this way you can access the module functions and objects with ${name}::
|
||||
|
||||
4. The generic_wrapper.rs file will be hardcoded into the package, you can use code from there.
|
||||
|
||||
```rust
|
||||
${generic_wrapper_rs}
|
||||
```
|
||||
|
||||
5. IMPORTANT: Prefer strongly typed return values over Dynamic types whenever possible. Only use Dynamic when absolutely necessary.
|
||||
- For example, return `Result<String, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a string
|
||||
- Use `Result<bool, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a boolean
|
||||
- Use `Result<Vec<String>, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a list of strings
|
||||
|
||||
6. Your code should include public functions that can be called from Rhai scripts
|
||||
|
||||
7. Make sure to implement all necessary helper functions for type conversion
|
||||
|
||||
8. DO NOT use the #[rhai_fn] attribute - functions will be registered directly in the engine
|
||||
|
||||
9. Make sure to handle string type consistency - use String::from() for string literals when returning in match arms with format!() strings
|
||||
|
||||
10. When returning path references, convert them to owned strings (e.g., path().to_string())
|
||||
|
||||
11. For error handling, use proper Result types with Box<EvalAltResult> for the error type:
|
||||
```rust
|
||||
// INCORRECT:
|
||||
pub fn some_function(arg: &str) -> Dynamic {
|
||||
match some_operation(arg) {
|
||||
Ok(result) => Dynamic::from(result),
|
||||
Err(err) => Dynamic::from(format!("Error: {}", err))
|
||||
}
|
||||
}
|
||||
|
||||
// CORRECT:
|
||||
pub fn some_function(arg: &str) -> Result<String, Box<EvalAltResult>> {
|
||||
some_operation(arg).map_err(|err| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("Error: {}", err).into(),
|
||||
rhai::Position::NONE
|
||||
))
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
12. IMPORTANT: Format your response with the code between triple backticks as follows:
|
||||
|
||||
```rust
|
||||
// wrapper.rs
|
||||
// Your wrapper implementation here
|
||||
```
|
||||
|
||||
```rust
|
||||
// engine.rs
|
||||
// Your engine.rs implementation here
|
||||
```
|
||||
|
||||
```rhai
|
||||
// example.rhai
|
||||
// Your example Rhai script here
|
||||
```
|
||||
|
||||
13. The example.rhai script should demonstrate the use of all the wrapper functions you create
|
||||
|
||||
14. The engine.rs file should contain a register_module function that registers all the wrapper functions and types with the Rhai engine, and a create function. For example:
|
||||
|
||||
${engine}
|
||||
|
||||
MOST IMPORTANT:
|
||||
import package being wrapped as `use sal::<name>`
|
||||
your engine create function is called `create_rhai_engine`
|
||||
|
||||
```
|
||||
'
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct WrapperModule {
|
||||
pub:
|
||||
lib_rs string
|
||||
example_rs string
|
||||
engine_rs string
|
||||
cargo_toml string
|
||||
example_rhai string
|
||||
generic_wrapper_rs string
|
||||
wrapper_rs string
|
||||
}
|
||||
|
||||
// functions is a list of function names that AI should extract and pass in
|
||||
fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
|
||||
// Define project directory paths
|
||||
name := name_
|
||||
project_dir := '${base_dir}/rhai'
|
||||
|
||||
// Create the project using cargo new --lib
|
||||
if os.exists(project_dir) {
|
||||
os.rmdir_all(project_dir) or {
|
||||
return error('Failed to clean existing project directory: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Run cargo new --lib to create the project
|
||||
os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
|
||||
|
||||
cargo_new_result := os.execute('cargo new --lib rhai')
|
||||
if cargo_new_result.exit_code != 0 {
|
||||
return error('Failed to create new library project: ${cargo_new_result.output}')
|
||||
}
|
||||
|
||||
// Create examples directory
|
||||
examples_dir := '${project_dir}/examples'
|
||||
os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
|
||||
|
||||
// Write the lib.rs file
|
||||
if wrapper.lib_rs != '' {
|
||||
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
|
||||
return error('Failed to write lib.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the wrapper.rs file
|
||||
if wrapper.wrapper_rs != '' {
|
||||
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
|
||||
return error('Failed to write wrapper.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the generic wrapper.rs file
|
||||
if wrapper.generic_wrapper_rs != '' {
|
||||
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
|
||||
return error('Failed to write generic wrapper.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the example.rs file
|
||||
if wrapper.example_rs != '' {
|
||||
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
|
||||
return error('Failed to write example.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the engine.rs file if provided
|
||||
if wrapper.engine_rs != '' {
|
||||
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
|
||||
return error('Failed to write engine.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the Cargo.toml file
|
||||
if wrapper.cargo_toml != '' {
|
||||
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
|
||||
return error('Failed to write Cargo.toml: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the example.rhai file if provided
|
||||
if wrapper.example_rhai != '' {
|
||||
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
|
||||
return error('Failed to write example.rhai: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
return project_dir
|
||||
}
|
||||
|
||||
// Helper function to extract code blocks from the response
|
||||
fn extract_code_block(response string, identifier string, language string) string {
|
||||
// Find the start marker for the code block
|
||||
mut start_marker := '```${language}\n// ${identifier}'
|
||||
if language == '' {
|
||||
start_marker = '```\n// ${identifier}'
|
||||
}
|
||||
|
||||
start_index := response.index(start_marker) or {
|
||||
// Try alternative format
|
||||
mut alt_marker := '```${language}\n${identifier}'
|
||||
if language == '' {
|
||||
alt_marker = '```\n${identifier}'
|
||||
}
|
||||
|
||||
response.index(alt_marker) or { return '' }
|
||||
}
|
||||
|
||||
// Find the end marker
|
||||
end_marker := '```'
|
||||
end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
|
||||
|
||||
// Extract the content between the markers
|
||||
content_start := start_index + start_marker.len
|
||||
content := response[content_start..end_index].trim_space()
|
||||
|
||||
return content
|
||||
}
|
||||
|
||||
// Extract module name from wrapper code
|
||||
fn extract_module_name(code string) string {
|
||||
lines := code.split('\n')
|
||||
|
||||
for line in lines {
|
||||
// Look for pub mod or mod declarations
|
||||
if line.contains('pub mod ') || line.contains('mod ') {
|
||||
// Extract module name
|
||||
mut parts := []string{}
|
||||
if line.contains('pub mod ') {
|
||||
parts = line.split('pub mod ')
|
||||
} else {
|
||||
parts = line.split('mod ')
|
||||
}
|
||||
|
||||
if parts.len > 1 {
|
||||
// Extract the module name and remove any trailing characters
|
||||
mut name := parts[1].trim_space()
|
||||
// Remove any trailing { or ; or whitespace
|
||||
name = name.trim_right('{').trim_right(';').trim_space()
|
||||
if name != '' {
|
||||
return name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
struct RhaiGen {
|
||||
name string
|
||||
dir string
|
||||
}
|
||||
|
||||
// Define the callback function that processes the response and compiles the code
|
||||
fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
|
||||
// Extract wrapper.rs content
|
||||
wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
|
||||
if wrapper_rs_content == '' {
|
||||
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
|
||||
}
|
||||
|
||||
// Extract engine.rs content
|
||||
mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
|
||||
if engine_rs_content == '' {
|
||||
// Try to extract from the response without explicit language marker
|
||||
engine_rs_content = extract_code_block(response, 'engine.rs', '')
|
||||
// if engine_rs_content == '' {
|
||||
// // Use the template engine.rs
|
||||
// engine_rs_content = $tmpl('./templates/engine.rs')
|
||||
// }
|
||||
}
|
||||
|
||||
// Extract example.rhai content
|
||||
mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
|
||||
if example_rhai_content == '' {
|
||||
// Try to extract from the response without explicit language marker
|
||||
example_rhai_content = extract_code_block(response, 'example.rhai', '')
|
||||
if example_rhai_content == '' {
|
||||
// Use the example from the template
|
||||
example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
|
||||
return error('Failed to read example.rhai template: ${err}')
|
||||
}
|
||||
|
||||
// Extract the code block from the markdown file
|
||||
example_rhai_content = extract_code_block(example_script_md, 'example.rhai',
|
||||
'rhai')
|
||||
if example_rhai_content == '' {
|
||||
return error('Failed to extract example.rhai from template file')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract function names from the wrapper.rs content
|
||||
functions := extract_functions_from_code(wrapper_rs_content)
|
||||
|
||||
println('Using module name: ${gen.name}_rhai')
|
||||
println('Extracted functions: ${functions.join(', ')}')
|
||||
|
||||
name := gen.name
|
||||
// Create a WrapperModule struct with the extracted content
|
||||
wrapper := WrapperModule{
|
||||
lib_rs: $tmpl('./templates/lib.rs')
|
||||
wrapper_rs: wrapper_rs_content
|
||||
example_rs: $tmpl('./templates/example.rs')
|
||||
engine_rs: engine_rs_content
|
||||
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
|
||||
cargo_toml: $tmpl('./templates/cargo.toml')
|
||||
example_rhai: example_rhai_content
|
||||
}
|
||||
|
||||
// Create the wrapper module
|
||||
base_target_dir := gen.dir
|
||||
project_dir := create_wrapper_module(wrapper, functions, gen.name, base_target_dir) or {
|
||||
return error('Failed to create wrapper module: ${err}')
|
||||
}
|
||||
|
||||
// Run the example
|
||||
os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
|
||||
|
||||
// Run cargo build first
|
||||
build_result := os.execute('cargo build')
|
||||
if build_result.exit_code != 0 {
|
||||
return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
|
||||
}
|
||||
|
||||
// Run the example
|
||||
run_result := os.execute('cargo run --example example')
|
||||
|
||||
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_result.output}\n\nRun output:\n${run_result.output}'
|
||||
}
|
||||
|
||||
// Extract function names from wrapper code
|
||||
fn extract_functions_from_code(code string) []string {
|
||||
mut functions := []string{}
|
||||
lines := code.split('\n')
|
||||
|
||||
for line in lines {
|
||||
if line.contains('pub fn ') && !line.contains('//') {
|
||||
// Extract function name
|
||||
parts := line.split('pub fn ')
|
||||
if parts.len > 1 {
|
||||
name_parts := parts[1].split('(')
|
||||
if name_parts.len > 0 {
|
||||
fn_name := name_parts[0].trim_space()
|
||||
if fn_name != '' {
|
||||
functions << fn_name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return functions
|
||||
}
|
||||
596
lib/ai/mcp/rhai/example/example.vsh
Executable file
596
lib/ai/mcp/rhai/example/example.vsh
Executable file
@@ -0,0 +1,596 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp.aitools.escalayer
|
||||
import os
|
||||
|
||||
fn main() {
|
||||
// Get the current directory where this script is located
|
||||
current_dir := os.dir(@FILE)
|
||||
|
||||
// Validate command line arguments
|
||||
source_code_path := validate_command_args() or {
|
||||
println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Read and combine all Rust files in the source directory
|
||||
source_code := read_source_code(source_code_path) or {
|
||||
println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Determine the crate path from the source code path
|
||||
crate_path := determine_crate_path(source_code_path) or {
|
||||
println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the module name from the directory path (last component)
|
||||
name := extract_module_name_from_path(source_code_path)
|
||||
|
||||
// Create the prompt content for the AI
|
||||
prompt_content := create_rhai_wrappers(name, source_code, read_file_safely('${current_dir}/prompts/example_script.md'),
|
||||
read_file_safely('${current_dir}/prompts/wrapper.md'), read_file_safely('${current_dir}/prompts/errors.md'),
|
||||
crate_path)
|
||||
|
||||
// Create the generator instance
|
||||
gen := RhaiGen{
|
||||
name: name
|
||||
dir: source_code_path
|
||||
}
|
||||
|
||||
// Run the task to generate Rhai wrappers
|
||||
run_wrapper_generation_task(prompt_content, gen) or {
|
||||
println('Task failed: ${err}')
|
||||
return
|
||||
}
|
||||
|
||||
println('Task completed successfully')
|
||||
println('The wrapper files have been generated and compiled in the target directory.')
|
||||
println('Check /Users/timurgordon/code/git.ourworld.tf/herocode/sal/src/rhai for the compiled output.')
|
||||
}
|
||||
|
||||
// Validates command line arguments and returns the source code path
|
||||
fn validate_command_args() !string {
|
||||
if os.args.len < 2 {
|
||||
return error('Please provide the path to the source code directory as an argument\nExample: ./example.vsh /path/to/source/code/directory')
|
||||
}
|
||||
|
||||
source_code_path := os.args[1]
|
||||
|
||||
if !os.exists(source_code_path) {
|
||||
return error('Source code path does not exist: ${source_code_path}')
|
||||
}
|
||||
|
||||
if !os.is_dir(source_code_path) {
|
||||
return error('Source code path is not a directory: ${source_code_path}')
|
||||
}
|
||||
|
||||
return source_code_path
|
||||
}
|
||||
|
||||
// Reads and combines all Rust files in the given directory
|
||||
fn read_source_code(source_code_path string) !string {
|
||||
// Get all files in the directory
|
||||
files := os.ls(source_code_path) or {
|
||||
return error('Failed to list files in directory: ${err}')
|
||||
}
|
||||
|
||||
// Combine all Rust files into a single source code string
|
||||
mut source_code := ''
|
||||
for file in files {
|
||||
file_path := os.join_path(source_code_path, file)
|
||||
|
||||
// Skip directories and non-Rust files
|
||||
if os.is_dir(file_path) || !file.ends_with('.rs') {
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the file content
|
||||
file_content := os.read_file(file_path) or {
|
||||
println('Failed to read file ${file_path}: ${err}')
|
||||
continue
|
||||
}
|
||||
|
||||
// Add file content to the combined source code
|
||||
source_code += '// File: ${file}\n${file_content}\n\n'
|
||||
}
|
||||
|
||||
if source_code == '' {
|
||||
return error('No Rust files found in directory: ${source_code_path}')
|
||||
}
|
||||
|
||||
return source_code
|
||||
}
|
||||
|
||||
// Determines the crate path from the source code path
|
||||
fn determine_crate_path(source_code_path string) !string {
|
||||
// Extract the path relative to the src directory
|
||||
src_index := source_code_path.index('src/') or {
|
||||
return error('Could not determine crate path: src/ not found in path')
|
||||
}
|
||||
|
||||
mut path_parts := source_code_path[src_index + 4..].split('/')
|
||||
// Remove the last part (the file name)
|
||||
if path_parts.len > 0 {
|
||||
path_parts.delete_last()
|
||||
}
|
||||
rel_path := path_parts.join('::')
|
||||
return 'sal::${rel_path}'
|
||||
}
|
||||
|
||||
// Extracts the module name from a directory path
|
||||
fn extract_module_name_from_path(path string) string {
|
||||
dir_parts := path.split('/')
|
||||
return dir_parts[dir_parts.len - 1]
|
||||
}
|
||||
|
||||
// Helper function to read a file or return empty string if file doesn't exist
|
||||
fn read_file_safely(file_path string) string {
|
||||
return os.read_file(file_path) or { '' }
|
||||
}
|
||||
|
||||
// Runs the task to generate Rhai wrappers
|
||||
fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
|
||||
// Create a new task
|
||||
mut task := escalayer.new_task(
|
||||
name: 'rhai_wrapper_creator.escalayer'
|
||||
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
|
||||
)
|
||||
|
||||
// Create model configs
|
||||
sonnet_model := escalayer.ModelConfig{
|
||||
name: 'anthropic/claude-3.7-sonnet'
|
||||
provider: 'anthropic'
|
||||
temperature: 0.7
|
||||
max_tokens: 25000
|
||||
}
|
||||
|
||||
gpt4_model := escalayer.ModelConfig{
|
||||
name: 'gpt-4'
|
||||
provider: 'openai'
|
||||
temperature: 0.7
|
||||
max_tokens: 25000
|
||||
}
|
||||
|
||||
// Create a prompt function that returns the prepared content
|
||||
prompt_function := fn [prompt_content] (input string) string {
|
||||
return prompt_content
|
||||
}
|
||||
|
||||
// Define a single unit task that handles everything
|
||||
task.new_unit_task(
|
||||
name: 'create_rhai_wrappers'
|
||||
prompt_function: prompt_function
|
||||
callback_function: gen.process_rhai_wrappers
|
||||
base_model: sonnet_model
|
||||
retry_model: gpt4_model
|
||||
retry_count: 1
|
||||
)
|
||||
|
||||
// Initiate the task
|
||||
return task.initiate('')
|
||||
}
|
||||
|
||||
// Define a Rhai wrapper generator function for Container functions
|
||||
fn create_rhai_wrappers(name string, source_code string, example_rhai string, wrapper_md string, errors_md string, crate_path string) string {
|
||||
// Load all required template and guide files
|
||||
guides := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')
|
||||
engine := $tmpl('./prompts/engine.md')
|
||||
vector_vs_array := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')
|
||||
rhai_integration_fixes := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')
|
||||
rhai_syntax_guide := load_guide_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')
|
||||
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
|
||||
|
||||
// Build the prompt content
|
||||
return build_prompt_content(name, source_code, example_rhai, wrapper_md, errors_md,
|
||||
guides, vector_vs_array, rhai_integration_fixes, rhai_syntax_guide, generic_wrapper_rs,
|
||||
engine)
|
||||
}
|
||||
|
||||
// Helper function to load guide files with error handling
|
||||
fn load_guide_file(path string) string {
|
||||
return os.read_file(path) or {
|
||||
eprintln('Warning: Failed to read guide file: ${path}')
|
||||
return ''
|
||||
}
|
||||
}
|
||||
|
||||
// Builds the prompt content for the AI
|
||||
fn build_prompt_content(name string, source_code string, example_rhai string, wrapper_md string,
|
||||
errors_md string, guides string, vector_vs_array string,
|
||||
rhai_integration_fixes string, rhai_syntax_guide string,
|
||||
generic_wrapper_rs string, engine string) string {
|
||||
return 'You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
|
||||
${guides}
|
||||
${vector_vs_array}
|
||||
${example_rhai}
|
||||
${wrapper_md}
|
||||
|
||||
## Common Errors to Avoid
|
||||
${errors_md}
|
||||
${rhai_integration_fixes}
|
||||
${rhai_syntax_guide}
|
||||
|
||||
## Your Task
|
||||
|
||||
Please create a wrapper.rs file that implements Rhai wrappers for the provided Rust code, and an example.rhai script that demonstrates how to use these wrappers:
|
||||
|
||||
## Rust Code to Wrap
|
||||
|
||||
```rust
|
||||
${source_code}
|
||||
```
|
||||
|
||||
IMPORTANT NOTES:
|
||||
1. For Rhai imports, use: `use rhai::{Engine, EvalAltResult, plugin::*, Dynamic, Map, Array};` - only import what you actually use
|
||||
2. The following dependencies are available in Cargo.toml:
|
||||
- rhai = "1.21.0"
|
||||
- serde = { version = "1.0", features = ["derive"] }
|
||||
- serde_json = "1.0"
|
||||
- sal = { path = "../../../" }
|
||||
|
||||
3. For the wrapper: `use sal::${name};` this way you can access the module functions and objects with ${name}::
|
||||
|
||||
4. The generic_wrapper.rs file will be hardcoded into the package, you can use code from there.
|
||||
|
||||
```rust
|
||||
${generic_wrapper_rs}
|
||||
```
|
||||
|
||||
5. IMPORTANT: Prefer strongly typed return values over Dynamic types whenever possible. Only use Dynamic when absolutely necessary.
|
||||
- For example, return `Result<String, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a string
|
||||
- Use `Result<bool, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a boolean
|
||||
- Use `Result<Vec<String>, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a list of strings
|
||||
|
||||
6. Your code should include public functions that can be called from Rhai scripts
|
||||
|
||||
7. Make sure to implement all necessary helper functions for type conversion
|
||||
|
||||
8. DO NOT use the #[rhai_fn] attribute - functions will be registered directly in the engine
|
||||
|
||||
9. Make sure to handle string type consistency - use String::from() for string literals when returning in match arms with format!() strings
|
||||
|
||||
10. When returning path references, convert them to owned strings (e.g., path().to_string())
|
||||
|
||||
11. For error handling, use proper Result types with Box<EvalAltResult> for the error type:
|
||||
```rust
|
||||
// INCORRECT:
|
||||
pub fn some_function(arg: &str) -> Dynamic {
|
||||
match some_operation(arg) {
|
||||
Ok(result) => Dynamic::from(result),
|
||||
Err(err) => Dynamic::from(format!("Error: {}", err))
|
||||
}
|
||||
}
|
||||
|
||||
// CORRECT:
|
||||
pub fn some_function(arg: &str) -> Result<String, Box<EvalAltResult>> {
|
||||
some_operation(arg).map_err(|err| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("Error: {}", err).into(),
|
||||
rhai::Position::NONE
|
||||
))
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
12. IMPORTANT: Format your response with the code between triple backticks as follows:
|
||||
|
||||
```rust
|
||||
// wrapper.rs
|
||||
// Your wrapper implementation here
|
||||
```
|
||||
|
||||
```rust
|
||||
// engine.rs
|
||||
// Your engine.rs implementation here
|
||||
```
|
||||
|
||||
```rhai
|
||||
// example.rhai
|
||||
// Your example Rhai script here
|
||||
```
|
||||
|
||||
13. The example.rhai script should demonstrate the use of all the wrapper functions you create
|
||||
|
||||
14. The engine.rs file should contain a register_module function that registers all the wrapper functions and types with the Rhai engine, and a create function. For example:
|
||||
|
||||
${engine}
|
||||
|
||||
MOST IMPORTANT:
|
||||
import package being wrapped as `use sal::<n>`
|
||||
your engine create function is called `create_rhai_engine`
|
||||
|
||||
```
|
||||
'
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct WrapperModule {
|
||||
pub:
|
||||
lib_rs string
|
||||
example_rs string
|
||||
engine_rs string
|
||||
cargo_toml string
|
||||
example_rhai string
|
||||
generic_wrapper_rs string
|
||||
wrapper_rs string
|
||||
}
|
||||
|
||||
// functions is a list of function names that AI should extract and pass in
|
||||
fn create_wrapper_module(wrapper WrapperModule, functions []string, name_ string, base_dir string) !string {
|
||||
// Define project directory paths
|
||||
name := name_
|
||||
project_dir := '${base_dir}/rhai'
|
||||
|
||||
// Create the project using cargo new --lib
|
||||
if os.exists(project_dir) {
|
||||
os.rmdir_all(project_dir) or {
|
||||
return error('Failed to clean existing project directory: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Run cargo new --lib to create the project
|
||||
os.chdir(base_dir) or { return error('Failed to change directory to base directory: ${err}') }
|
||||
|
||||
cargo_new_result := os.execute('cargo new --lib rhai')
|
||||
if cargo_new_result.exit_code != 0 {
|
||||
return error('Failed to create new library project: ${cargo_new_result.output}')
|
||||
}
|
||||
|
||||
// Create examples directory
|
||||
examples_dir := '${project_dir}/examples'
|
||||
os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
|
||||
|
||||
// Write the lib.rs file
|
||||
if wrapper.lib_rs != '' {
|
||||
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
|
||||
return error('Failed to write lib.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the wrapper.rs file
|
||||
if wrapper.wrapper_rs != '' {
|
||||
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
|
||||
return error('Failed to write wrapper.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the generic wrapper.rs file
|
||||
if wrapper.generic_wrapper_rs != '' {
|
||||
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
|
||||
return error('Failed to write generic wrapper.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the example.rs file
|
||||
if wrapper.example_rs != '' {
|
||||
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
|
||||
return error('Failed to write example.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the engine.rs file if provided
|
||||
if wrapper.engine_rs != '' {
|
||||
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
|
||||
return error('Failed to write engine.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the Cargo.toml file
|
||||
if wrapper.cargo_toml != '' {
|
||||
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
|
||||
return error('Failed to write Cargo.toml: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the example.rhai file if provided
|
||||
if wrapper.example_rhai != '' {
|
||||
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
|
||||
return error('Failed to write example.rhai: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
return project_dir
|
||||
}
|
||||
|
||||
// Helper function to extract code blocks from the response
|
||||
fn extract_code_block(response string, identifier string, language string) string {
|
||||
// Find the start marker for the code block
|
||||
mut start_marker := '```${language}\n// ${identifier}'
|
||||
if language == '' {
|
||||
start_marker = '```\n// ${identifier}'
|
||||
}
|
||||
|
||||
start_index := response.index(start_marker) or {
|
||||
// Try alternative format
|
||||
mut alt_marker := '```${language}\n${identifier}'
|
||||
if language == '' {
|
||||
alt_marker = '```\n${identifier}'
|
||||
}
|
||||
|
||||
response.index(alt_marker) or { return '' }
|
||||
}
|
||||
|
||||
// Find the end marker
|
||||
end_marker := '```'
|
||||
end_index := response.index_after(end_marker, start_index + start_marker.len) or { return '' }
|
||||
|
||||
// Extract the content between the markers
|
||||
content_start := start_index + start_marker.len
|
||||
content := response[content_start..end_index].trim_space()
|
||||
|
||||
return content
|
||||
}
|
||||
|
||||
// Extract module name from wrapper code
|
||||
fn extract_module_name(code string) string {
|
||||
lines := code.split('\n')
|
||||
|
||||
for line in lines {
|
||||
// Look for pub mod or mod declarations
|
||||
if line.contains('pub mod ') || line.contains('mod ') {
|
||||
// Extract module name
|
||||
mut parts := []string{}
|
||||
if line.contains('pub mod ') {
|
||||
parts = line.split('pub mod ')
|
||||
} else {
|
||||
parts = line.split('mod ')
|
||||
}
|
||||
|
||||
if parts.len > 1 {
|
||||
// Extract the module name and remove any trailing characters
|
||||
mut name := parts[1].trim_space()
|
||||
// Remove any trailing { or ; or whitespace
|
||||
name = name.trim_right('{').trim_right(';').trim_space()
|
||||
if name != '' {
|
||||
return name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
// RhaiGen struct for generating Rhai wrappers
|
||||
struct RhaiGen {
|
||||
name string
|
||||
dir string
|
||||
}
|
||||
|
||||
// Process the AI response and compile the generated code
|
||||
fn (gen RhaiGen) process_rhai_wrappers(response string) !string {
|
||||
// Extract code blocks from the response
|
||||
code_blocks := extract_code_blocks(response) or { return err }
|
||||
|
||||
// Extract function names from the wrapper.rs content
|
||||
functions := extract_functions_from_code(code_blocks.wrapper_rs)
|
||||
|
||||
println('Using module name: ${gen.name}_rhai')
|
||||
println('Extracted functions: ${functions.join(', ')}')
|
||||
|
||||
name := gen.name
|
||||
|
||||
// Create a WrapperModule struct with the extracted content
|
||||
wrapper := WrapperModule{
|
||||
lib_rs: $tmpl('./templates/lib.rs')
|
||||
wrapper_rs: code_blocks.wrapper_rs
|
||||
example_rs: $tmpl('./templates/example.rs')
|
||||
engine_rs: code_blocks.engine_rs
|
||||
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
|
||||
cargo_toml: $tmpl('./templates/cargo.toml')
|
||||
example_rhai: code_blocks.example_rhai
|
||||
}
|
||||
|
||||
// Create the wrapper module
|
||||
project_dir := create_wrapper_module(wrapper, functions, gen.name, gen.dir) or {
|
||||
return error('Failed to create wrapper module: ${err}')
|
||||
}
|
||||
|
||||
// Build and run the project
|
||||
build_output, run_output := build_and_run_project(project_dir) or { return err }
|
||||
|
||||
return format_success_message(project_dir, build_output, run_output)
|
||||
}
|
||||
|
||||
// CodeBlocks struct to hold extracted code blocks
|
||||
struct CodeBlocks {
|
||||
wrapper_rs string
|
||||
engine_rs string
|
||||
example_rhai string
|
||||
}
|
||||
|
||||
// Extract code blocks from the AI response
|
||||
fn extract_code_blocks(response string) !CodeBlocks {
|
||||
// Extract wrapper.rs content
|
||||
wrapper_rs_content := extract_code_block(response, 'wrapper.rs', 'rust')
|
||||
if wrapper_rs_content == '' {
|
||||
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
|
||||
}
|
||||
|
||||
// Extract engine.rs content
|
||||
mut engine_rs_content := extract_code_block(response, 'engine.rs', 'rust')
|
||||
if engine_rs_content == '' {
|
||||
// Try to extract from the response without explicit language marker
|
||||
engine_rs_content = extract_code_block(response, 'engine.rs', '')
|
||||
}
|
||||
|
||||
// Extract example.rhai content
|
||||
mut example_rhai_content := extract_code_block(response, 'example.rhai', 'rhai')
|
||||
if example_rhai_content == '' {
|
||||
// Try to extract from the response without explicit language marker
|
||||
example_rhai_content = extract_code_block(response, 'example.rhai', '')
|
||||
if example_rhai_content == '' {
|
||||
// Use the example from the template
|
||||
example_rhai_content = load_example_from_template() or { return err }
|
||||
}
|
||||
}
|
||||
|
||||
return CodeBlocks{
|
||||
wrapper_rs: wrapper_rs_content
|
||||
engine_rs: engine_rs_content
|
||||
example_rhai: example_rhai_content
|
||||
}
|
||||
}
|
||||
|
||||
// Load example.rhai from template file
|
||||
fn load_example_from_template() !string {
|
||||
example_script_md := os.read_file('${os.dir(@FILE)}/prompts/example_script.md') or {
|
||||
return error('Failed to read example.rhai template: ${err}')
|
||||
}
|
||||
|
||||
// Extract the code block from the markdown file
|
||||
example_rhai_content := extract_code_block(example_script_md, 'example.rhai', 'rhai')
|
||||
if example_rhai_content == '' {
|
||||
return error('Failed to extract example.rhai from template file')
|
||||
}
|
||||
|
||||
return example_rhai_content
|
||||
}
|
||||
|
||||
// Build and run the project
|
||||
fn build_and_run_project(project_dir string) !(string, string) {
|
||||
// Change to the project directory
|
||||
os.chdir(project_dir) or { return error('Failed to change directory to project: ${err}') }
|
||||
|
||||
// Run cargo build first
|
||||
build_result := os.execute('cargo build')
|
||||
if build_result.exit_code != 0 {
|
||||
return error('Compilation failed. Please fix the following errors and ensure your code is compatible with the existing codebase:\n\n${build_result.output}')
|
||||
}
|
||||
|
||||
// Run the example
|
||||
run_result := os.execute('cargo run --example example')
|
||||
|
||||
return build_result.output, run_result.output
|
||||
}
|
||||
|
||||
// Format success message
|
||||
fn format_success_message(project_dir string, build_output string, run_output string) string {
|
||||
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
|
||||
}
|
||||
|
||||
// Extract function names from wrapper code
|
||||
fn extract_functions_from_code(code string) []string {
|
||||
mut functions := []string{}
|
||||
lines := code.split('\n')
|
||||
|
||||
for line in lines {
|
||||
if line.contains('pub fn ') && !line.contains('//') {
|
||||
// Extract function name
|
||||
parts := line.split('pub fn ')
|
||||
if parts.len > 1 {
|
||||
name_parts := parts[1].split('(')
|
||||
if name_parts.len > 0 {
|
||||
fn_name := name_parts[0].trim_space()
|
||||
if fn_name != '' {
|
||||
functions << fn_name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return functions
|
||||
}
|
||||
283
lib/ai/mcp/rhai/logic/logic.v
Normal file
283
lib/ai/mcp/rhai/logic/logic.v
Normal file
@@ -0,0 +1,283 @@
|
||||
module logic
|
||||
|
||||
import freeflowuniverse.herolib.ai.escalayer
|
||||
import freeflowuniverse.herolib.lang.rust
|
||||
import freeflowuniverse.herolib.ai.utils
|
||||
import os
|
||||
|
||||
pub fn generate_rhai_wrapper(name string, source_path string) !string {
|
||||
// Detect source package and module information
|
||||
source_pkg_info := rust.detect_source_package(source_path)!
|
||||
source_code := rust.read_source_code(source_path)!
|
||||
prompt := rhai_wrapper_generation_prompt(name, source_code, source_pkg_info)!
|
||||
return run_wrapper_generation_task(prompt, RhaiGen{
|
||||
name: name
|
||||
dir: source_path
|
||||
source_pkg_info: source_pkg_info
|
||||
})!
|
||||
}
|
||||
|
||||
// Runs the task to generate Rhai wrappers
|
||||
pub fn run_wrapper_generation_task(prompt_content string, gen RhaiGen) !string {
|
||||
// Create a new task
|
||||
mut task := escalayer.new_task(
|
||||
name: 'rhai_wrapper_creator.escalayer'
|
||||
description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
|
||||
)
|
||||
|
||||
// Create model configs
|
||||
sonnet_model := escalayer.ModelConfig{
|
||||
name: 'anthropic/claude-3.7-sonnet'
|
||||
provider: 'anthropic'
|
||||
temperature: 0.7
|
||||
max_tokens: 25000
|
||||
}
|
||||
|
||||
gpt4_model := escalayer.ModelConfig{
|
||||
name: 'gpt-4'
|
||||
provider: 'openai'
|
||||
temperature: 0.7
|
||||
max_tokens: 25000
|
||||
}
|
||||
|
||||
// Create a prompt function that returns the prepared content
|
||||
prompt_function := fn [prompt_content] (input string) string {
|
||||
return prompt_content
|
||||
}
|
||||
|
||||
// Define a single unit task that handles everything
|
||||
task.new_unit_task(
|
||||
name: 'create_rhai_wrappers'
|
||||
prompt_function: prompt_function
|
||||
callback_function: gen.process_rhai_wrappers
|
||||
base_model: sonnet_model
|
||||
retry_model: gpt4_model
|
||||
retry_count: 1
|
||||
)
|
||||
|
||||
// Initiate the task
|
||||
return task.initiate('')
|
||||
}
|
||||
|
||||
// Define a Rhai wrapper generator function for Container functions
|
||||
pub fn rhai_wrapper_generation_prompt(name string, source_code string, source_pkg_info rust.SourcePackageInfo) !string {
|
||||
current_dir := os.dir(@FILE)
|
||||
example_rhai := os.read_file('${current_dir}/prompts/example_script.md')!
|
||||
wrapper_md := os.read_file('${current_dir}/prompts/wrapper.md')!
|
||||
errors_md := os.read_file('${current_dir}/prompts/errors.md')!
|
||||
|
||||
// Load all required template and guide files
|
||||
guides := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhaiwrapping_classicai.md')!
|
||||
engine := $tmpl('./prompts/engine.md')
|
||||
vector_vs_array := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_array_vs_vector.md')!
|
||||
rhai_integration_fixes := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_integration_fixes.md')!
|
||||
rhai_syntax_guide := os.read_file('/Users/timurgordon/code/git.ourworld.tf/herocode/sal/aiprompts/rhai_syntax_guide.md')!
|
||||
generic_wrapper_rs := $tmpl('./templates/generic_wrapper.rs')
|
||||
|
||||
prompt := $tmpl('./prompts/main.md')
|
||||
return prompt
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct WrapperModule {
|
||||
pub:
|
||||
lib_rs string
|
||||
example_rs string
|
||||
engine_rs string
|
||||
cargo_toml string
|
||||
example_rhai string
|
||||
generic_wrapper_rs string
|
||||
wrapper_rs string
|
||||
}
|
||||
|
||||
// functions is a list of function names that AI should extract and pass in
|
||||
pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string) !string {
|
||||
// Define project directory paths
|
||||
project_dir := '${path}/rhai'
|
||||
|
||||
// Create the project using cargo new --lib
|
||||
if os.exists(project_dir) {
|
||||
os.rmdir_all(project_dir) or {
|
||||
return error('Failed to clean existing project directory: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Run cargo new --lib to create the project
|
||||
os.chdir(path) or { return error('Failed to change directory to base directory: ${err}') }
|
||||
|
||||
cargo_new_result := os.execute('cargo new --lib rhai')
|
||||
if cargo_new_result.exit_code != 0 {
|
||||
return error('Failed to create new library project: ${cargo_new_result.output}')
|
||||
}
|
||||
|
||||
// Create examples directory
|
||||
examples_dir := '${project_dir}/examples'
|
||||
os.mkdir_all(examples_dir) or { return error('Failed to create examples directory: ${err}') }
|
||||
|
||||
// Write the lib.rs file
|
||||
if wrapper.lib_rs != '' {
|
||||
os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
|
||||
return error('Failed to write lib.rs: ${err}')
|
||||
}
|
||||
} else {
|
||||
// Use default lib.rs template if none provided
|
||||
lib_rs_content := $tmpl('./templates/lib.rs')
|
||||
os.write_file('${project_dir}/src/lib.rs', lib_rs_content) or {
|
||||
return error('Failed to write lib.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the wrapper.rs file
|
||||
if wrapper.wrapper_rs != '' {
|
||||
os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
|
||||
return error('Failed to write wrapper.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the generic wrapper.rs file
|
||||
if wrapper.generic_wrapper_rs != '' {
|
||||
os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
|
||||
return error('Failed to write generic wrapper.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the example.rs file
|
||||
if wrapper.example_rs != '' {
|
||||
os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
|
||||
return error('Failed to write example.rs: ${err}')
|
||||
}
|
||||
} else {
|
||||
// Use default example.rs template if none provided
|
||||
example_rs_content := $tmpl('./templates/example.rs')
|
||||
os.write_file('${examples_dir}/example.rs', example_rs_content) or {
|
||||
return error('Failed to write example.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the engine.rs file if provided
|
||||
if wrapper.engine_rs != '' {
|
||||
os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
|
||||
return error('Failed to write engine.rs: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
// Write the Cargo.toml file
|
||||
os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
|
||||
return error('Failed to write Cargo.toml: ${err}')
|
||||
}
|
||||
|
||||
// Write the example.rhai file
|
||||
os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
|
||||
return error('Failed to write example.rhai: ${err}')
|
||||
}
|
||||
|
||||
return project_dir
|
||||
}
|
||||
|
||||
// Extract module name from wrapper code
|
||||
fn extract_module_name(code string) string {
|
||||
lines := code.split('\n')
|
||||
|
||||
for line in lines {
|
||||
// Look for pub mod or mod declarations
|
||||
if line.contains('pub mod ') || line.contains('mod ') {
|
||||
// Extract module name
|
||||
mut parts := []string{}
|
||||
if line.contains('pub mod ') {
|
||||
parts = line.split('pub mod ')
|
||||
} else {
|
||||
parts = line.split('mod ')
|
||||
}
|
||||
|
||||
if parts.len > 1 {
|
||||
// Extract the module name and remove any trailing characters
|
||||
mut name := parts[1].trim_space()
|
||||
// Remove any trailing { or ; or whitespace
|
||||
name = name.trim_right('{').trim_right(';').trim_space()
|
||||
if name != '' {
|
||||
return name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
// RhaiGen struct for generating Rhai wrappers
|
||||
struct RhaiGen {
|
||||
name string
|
||||
dir string
|
||||
source_pkg_info rust.SourcePackageInfo
|
||||
}
|
||||
|
||||
// Process the AI response and compile the generated code
|
||||
pub fn (gen RhaiGen) process_rhai_wrappers(input string) !string {
|
||||
blocks := extract_code_blocks(input)!
|
||||
source_pkg_info := gen.source_pkg_info
|
||||
// Create the module structure
|
||||
mod := WrapperModule{
|
||||
lib_rs: blocks.lib_rs
|
||||
engine_rs: blocks.engine_rs
|
||||
example_rhai: blocks.example_rhai
|
||||
generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
|
||||
wrapper_rs: blocks.wrapper_rs
|
||||
}
|
||||
|
||||
// Write the module files
|
||||
project_dir := write_rhai_wrapper_module(mod, gen.name, gen.dir)!
|
||||
|
||||
return project_dir
|
||||
}
|
||||
|
||||
// CodeBlocks struct to hold extracted code blocks
|
||||
struct CodeBlocks {
|
||||
wrapper_rs string
|
||||
engine_rs string
|
||||
example_rhai string
|
||||
lib_rs string
|
||||
}
|
||||
|
||||
// Extract code blocks from the AI response
|
||||
fn extract_code_blocks(response string) !CodeBlocks {
|
||||
// Extract wrapper.rs content
|
||||
wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
|
||||
if wrapper_rs_content == '' {
|
||||
return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
|
||||
}
|
||||
|
||||
// Extract engine.rs content
|
||||
mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
|
||||
if engine_rs_content == '' {
|
||||
// Try to extract from the response without explicit language marker
|
||||
engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
|
||||
}
|
||||
|
||||
// Extract example.rhai content
|
||||
mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
|
||||
if example_rhai_content == '' {
|
||||
// Try to extract from the response without explicit language marker
|
||||
example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
|
||||
if example_rhai_content == '' {
|
||||
return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
|
||||
}
|
||||
}
|
||||
|
||||
// Extract lib.rs content
|
||||
lib_rs_content := utils.extract_code_block(response, 'lib.rs', 'rust')
|
||||
if lib_rs_content == '' {
|
||||
return error('Failed to extract lib.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// lib.rs and ends with ```')
|
||||
}
|
||||
|
||||
return CodeBlocks{
|
||||
wrapper_rs: wrapper_rs_content
|
||||
engine_rs: engine_rs_content
|
||||
example_rhai: example_rhai_content
|
||||
lib_rs: lib_rs_content
|
||||
}
|
||||
}
|
||||
|
||||
// Format success message
|
||||
fn format_success_message(project_dir string, build_output string, run_output string) string {
|
||||
return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
|
||||
}
|
||||
258
lib/ai/mcp/rhai/logic/logic_sampling.v
Normal file
258
lib/ai/mcp/rhai/logic/logic_sampling.v
Normal file
@@ -0,0 +1,258 @@
|
||||
module logic
|
||||
|
||||
import freeflowuniverse.herolib.ai.escalayer
|
||||
import freeflowuniverse.herolib.lang.rust
|
||||
import freeflowuniverse.herolib.ai.utils
|
||||
import os
|
||||
|
||||
// pub fn generate_rhai_wrapper_sampling(name string, source_path string) !string {
|
||||
// prompt := rhai_wrapper_generation_prompt(name, source_path) or {panic(err)}
|
||||
// return run_wrapper_generation_task_sampling(prompt, RhaiGen{
|
||||
// name: name
|
||||
// dir: source_path
|
||||
// }) or {panic(err)}
|
||||
// }
|
||||
|
||||
// // Runs the task to generate Rhai wrappers
|
||||
// pub fn run_wrapper_generation_task_sampling(prompt_content string, gen RhaiGen) !string {
|
||||
// // Create a new task
|
||||
// mut task := escalayer.new_task(
|
||||
// name: 'rhai_wrapper_creator.escalayer'
|
||||
// description: 'Create Rhai wrappers for Rust functions that follow builder pattern and create examples corresponding to the provided example file'
|
||||
// )
|
||||
|
||||
// // Create model configs
|
||||
// sonnet_model := escalayer.ModelConfig{
|
||||
// name: 'anthropic/claude-3.7-sonnet'
|
||||
// provider: 'anthropic'
|
||||
// temperature: 0.7
|
||||
// max_tokens: 25000
|
||||
// }
|
||||
|
||||
// gpt4_model := escalayer.ModelConfig{
|
||||
// name: 'gpt-4'
|
||||
// provider: 'openai'
|
||||
// temperature: 0.7
|
||||
// max_tokens: 25000
|
||||
// }
|
||||
|
||||
// // Create a prompt function that returns the prepared content
|
||||
// prompt_function := fn [prompt_content] (input string) string {
|
||||
// return prompt_content
|
||||
// }
|
||||
|
||||
// // Define a single unit task that handles everything
|
||||
// task.new_unit_task(
|
||||
// name: 'create_rhai_wrappers'
|
||||
// prompt_function: prompt_function
|
||||
// callback_function: gen.process_rhai_wrappers
|
||||
// base_model: sonnet_model
|
||||
// retry_model: gpt4_model
|
||||
// retry_count: 1
|
||||
// )
|
||||
|
||||
// // Initiate the task
|
||||
// return task.initiate('')
|
||||
// }
|
||||
|
||||
// @[params]
|
||||
// pub struct WrapperModule {
|
||||
// pub:
|
||||
// lib_rs string
|
||||
// example_rs string
|
||||
// engine_rs string
|
||||
// cargo_toml string
|
||||
// example_rhai string
|
||||
// generic_wrapper_rs string
|
||||
// wrapper_rs string
|
||||
// }
|
||||
|
||||
// // functions is a list of function names that AI should extract and pass in
|
||||
// pub fn write_rhai_wrapper_module(wrapper WrapperModule, name string, path string)! string {
|
||||
|
||||
// // Define project directory paths
|
||||
// project_dir := '${path}/rhai'
|
||||
|
||||
// // Create the project using cargo new --lib
|
||||
// if os.exists(project_dir) {
|
||||
// os.rmdir_all(project_dir) or {
|
||||
// return error('Failed to clean existing project directory: ${err}')
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Run cargo new --lib to create the project
|
||||
// os.chdir(path) or {
|
||||
// return error('Failed to change directory to base directory: ${err}')
|
||||
// }
|
||||
|
||||
// cargo_new_result := os.execute('cargo new --lib rhai')
|
||||
// if cargo_new_result.exit_code != 0 {
|
||||
// return error('Failed to create new library project: ${cargo_new_result.output}')
|
||||
// }
|
||||
|
||||
// // Create examples directory
|
||||
// examples_dir := '${project_dir}/examples'
|
||||
// os.mkdir_all(examples_dir) or {
|
||||
// return error('Failed to create examples directory: ${err}')
|
||||
// }
|
||||
|
||||
// // Write the lib.rs file
|
||||
// if wrapper.lib_rs != '' {
|
||||
// os.write_file('${project_dir}/src/lib.rs', wrapper.lib_rs) or {
|
||||
// return error('Failed to write lib.rs: ${err}')
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Write the wrapper.rs file
|
||||
// if wrapper.wrapper_rs != '' {
|
||||
// os.write_file('${project_dir}/src/wrapper.rs', wrapper.wrapper_rs) or {
|
||||
// return error('Failed to write wrapper.rs: ${err}')
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Write the generic wrapper.rs file
|
||||
// if wrapper.generic_wrapper_rs != '' {
|
||||
// os.write_file('${project_dir}/src/generic_wrapper.rs', wrapper.generic_wrapper_rs) or {
|
||||
// return error('Failed to write generic wrapper.rs: ${err}')
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Write the example.rs file
|
||||
// if wrapper.example_rs != '' {
|
||||
// os.write_file('${examples_dir}/example.rs', wrapper.example_rs) or {
|
||||
// return error('Failed to write example.rs: ${err}')
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Write the engine.rs file if provided
|
||||
// if wrapper.engine_rs != '' {
|
||||
// os.write_file('${project_dir}/src/engine.rs', wrapper.engine_rs) or {
|
||||
// return error('Failed to write engine.rs: ${err}')
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Write the Cargo.toml file
|
||||
// os.write_file('${project_dir}/Cargo.toml', wrapper.cargo_toml) or {
|
||||
// return error('Failed to write Cargo.toml: ${err}')
|
||||
// }
|
||||
|
||||
// // Write the example.rhai file
|
||||
// os.write_file('${examples_dir}/example.rhai', wrapper.example_rhai) or {
|
||||
// return error('Failed to write example.rhai: ${err}')
|
||||
// }
|
||||
|
||||
// return project_dir
|
||||
// }
|
||||
|
||||
// // Extract module name from wrapper code
|
||||
// fn extract_module_name(code string) string {
|
||||
// lines := code.split('\n')
|
||||
|
||||
// for line in lines {
|
||||
// // Look for pub mod or mod declarations
|
||||
// if line.contains('pub mod ') || line.contains('mod ') {
|
||||
// // Extract module name
|
||||
// mut parts := []string{}
|
||||
// if line.contains('pub mod ') {
|
||||
// parts = line.split('pub mod ')
|
||||
// } else {
|
||||
// parts = line.split('mod ')
|
||||
// }
|
||||
|
||||
// if parts.len > 1 {
|
||||
// // Extract the module name and remove any trailing characters
|
||||
// mut name := parts[1].trim_space()
|
||||
// // Remove any trailing { or ; or whitespace
|
||||
// name = name.trim_right('{').trim_right(';').trim_space()
|
||||
// if name != '' {
|
||||
// return name
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// return ''
|
||||
// }
|
||||
|
||||
// // RhaiGen struct for generating Rhai wrappers
|
||||
// struct RhaiGen {
|
||||
// name string
|
||||
// dir string
|
||||
// }
|
||||
|
||||
// // Process the AI response and compile the generated code
|
||||
// fn (gen RhaiGen) process_rhai_wrappers(response string)! string {
|
||||
// // Extract code blocks from the response
|
||||
// code_blocks := extract_code_blocks(response) or {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// name := gen.name
|
||||
|
||||
// // Create a WrapperModule struct with the extracted content
|
||||
// wrapper := WrapperModule{
|
||||
// lib_rs: $tmpl('./templates/lib.rs')
|
||||
// wrapper_rs: code_blocks.wrapper_rs
|
||||
// example_rs: $tmpl('./templates/example.rs')
|
||||
// engine_rs: code_blocks.engine_rs
|
||||
// generic_wrapper_rs: $tmpl('./templates/generic_wrapper.rs')
|
||||
// cargo_toml: $tmpl('./templates/cargo.toml')
|
||||
// example_rhai: code_blocks.example_rhai
|
||||
// }
|
||||
|
||||
// // Create the wrapper module
|
||||
// project_dir := write_rhai_wrapper_module(wrapper, gen.name, gen.dir) or {
|
||||
// return error('Failed to create wrapper module: ${err}')
|
||||
// }
|
||||
|
||||
// // Build and run the project
|
||||
// build_output, run_output := rust.run_example(project_dir, 'example') or {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// return format_success_message(project_dir, build_output, run_output)
|
||||
// }
|
||||
|
||||
// // CodeBlocks struct to hold extracted code blocks
|
||||
// struct CodeBlocks {
|
||||
// wrapper_rs string
|
||||
// engine_rs string
|
||||
// example_rhai string
|
||||
// }
|
||||
|
||||
// // Extract code blocks from the AI response
|
||||
// fn extract_code_blocks(response string)! CodeBlocks {
|
||||
// // Extract wrapper.rs content
|
||||
// wrapper_rs_content := utils.extract_code_block(response, 'wrapper.rs', 'rust')
|
||||
// if wrapper_rs_content == '' {
|
||||
// return error('Failed to extract wrapper.rs content from response. Please ensure your code is properly formatted inside a code block that starts with ```rust\n// wrapper.rs and ends with ```')
|
||||
// }
|
||||
|
||||
// // Extract engine.rs content
|
||||
// mut engine_rs_content := utils.extract_code_block(response, 'engine.rs', 'rust')
|
||||
// if engine_rs_content == '' {
|
||||
// // Try to extract from the response without explicit language marker
|
||||
// engine_rs_content = utils.extract_code_block(response, 'engine.rs', '')
|
||||
// }
|
||||
|
||||
// // Extract example.rhai content
|
||||
// mut example_rhai_content := utils.extract_code_block(response, 'example.rhai', 'rhai')
|
||||
// if example_rhai_content == '' {
|
||||
// // Try to extract from the response without explicit language marker
|
||||
// example_rhai_content = utils.extract_code_block(response, 'example.rhai', '')
|
||||
// if example_rhai_content == '' {
|
||||
// return error('Failed to extract example.rhai content from response. Please ensure your code is properly formatted inside a code block that starts with ```rhai\n// example.rhai and ends with ```')
|
||||
// }
|
||||
// }
|
||||
|
||||
// return CodeBlocks{
|
||||
// wrapper_rs: wrapper_rs_content
|
||||
// engine_rs: engine_rs_content
|
||||
// example_rhai: example_rhai_content
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Format success message
|
||||
// fn format_success_message(project_dir string, build_output string, run_output string) string {
|
||||
// return 'Successfully generated Rhai wrappers and ran the example!\n\nProject created at: ${project_dir}\n\nBuild output:\n${build_output}\n\nRun output:\n${run_output}'
|
||||
// }
|
||||
125
lib/ai/mcp/rhai/logic/prompts/engine.md
Normal file
125
lib/ai/mcp/rhai/logic/prompts/engine.md
Normal file
@@ -0,0 +1,125 @@
|
||||
|
||||
# Engine
|
||||
|
||||
Here is an example of a well-implemented Rhai engine for the Git module:
|
||||
|
||||
## Example engine
|
||||
|
||||
```rust
|
||||
// engine.rs
|
||||
|
||||
/// Register Nerdctl module functions with the Rhai engine
|
||||
pub fn create_rhai_engine() -> Engine {
|
||||
let mut engine = Engine::new();
|
||||
|
||||
register_nerdctl_module(&mut engine)?;
|
||||
register_nerdctl_types(&mut engine)?;
|
||||
|
||||
engine
|
||||
}
|
||||
|
||||
pub fn register_nerdctl_module(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
// Register Container constructor
|
||||
engine.register_fn("nerdctl_container_new", container_new);
|
||||
engine.register_fn("nerdctl_container_from_image", container_from_image);
|
||||
|
||||
// Register Container instance methods
|
||||
engine.register_fn("reset", container_reset);
|
||||
engine.register_fn("with_port", container_with_port);
|
||||
engine.register_fn("with_volume", container_with_volume);
|
||||
engine.register_fn("with_env", container_with_env);
|
||||
engine.register_fn("with_network", container_with_network);
|
||||
engine.register_fn("with_network_alias", container_with_network_alias);
|
||||
engine.register_fn("with_cpu_limit", container_with_cpu_limit);
|
||||
engine.register_fn("with_memory_limit", container_with_memory_limit);
|
||||
engine.register_fn("with_restart_policy", container_with_restart_policy);
|
||||
engine.register_fn("with_health_check", container_with_health_check);
|
||||
engine.register_fn("with_ports", container_with_ports);
|
||||
engine.register_fn("with_volumes", container_with_volumes);
|
||||
engine.register_fn("with_envs", container_with_envs);
|
||||
engine.register_fn("with_network_aliases", container_with_network_aliases);
|
||||
engine.register_fn("with_memory_swap_limit", container_with_memory_swap_limit);
|
||||
engine.register_fn("with_cpu_shares", container_with_cpu_shares);
|
||||
engine.register_fn("with_health_check_options", container_with_health_check_options);
|
||||
engine.register_fn("with_snapshotter", container_with_snapshotter);
|
||||
engine.register_fn("with_detach", container_with_detach);
|
||||
engine.register_fn("build", container_build);
|
||||
engine.register_fn("start", container_start);
|
||||
engine.register_fn("stop", container_stop);
|
||||
engine.register_fn("remove", container_remove);
|
||||
engine.register_fn("exec", container_exec);
|
||||
engine.register_fn("logs", container_logs);
|
||||
engine.register_fn("copy", container_copy);
|
||||
|
||||
// Register legacy container functions (for backward compatibility)
|
||||
engine.register_fn("nerdctl_run", nerdctl_run);
|
||||
engine.register_fn("nerdctl_run_with_name", nerdctl_run_with_name);
|
||||
engine.register_fn("nerdctl_run_with_port", nerdctl_run_with_port);
|
||||
engine.register_fn("new_run_options", new_run_options);
|
||||
engine.register_fn("nerdctl_exec", nerdctl_exec);
|
||||
engine.register_fn("nerdctl_copy", nerdctl_copy);
|
||||
engine.register_fn("nerdctl_stop", nerdctl_stop);
|
||||
engine.register_fn("nerdctl_remove", nerdctl_remove);
|
||||
engine.register_fn("nerdctl_list", nerdctl_list);
|
||||
engine.register_fn("nerdctl_logs", nerdctl_logs);
|
||||
|
||||
// Register image functions
|
||||
engine.register_fn("nerdctl_images", nerdctl_images);
|
||||
engine.register_fn("nerdctl_image_remove", nerdctl_image_remove);
|
||||
engine.register_fn("nerdctl_image_push", nerdctl_image_push);
|
||||
engine.register_fn("nerdctl_image_tag", nerdctl_image_tag);
|
||||
engine.register_fn("nerdctl_image_pull", nerdctl_image_pull);
|
||||
engine.register_fn("nerdctl_image_commit", nerdctl_image_commit);
|
||||
engine.register_fn("nerdctl_image_build", nerdctl_image_build);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Register Nerdctl module types with the Rhai engine
|
||||
fn register_nerdctl_types(engine: &mut Engine) -> Result<(), Box<EvalAltResult>> {
|
||||
// Register Container type
|
||||
engine.register_type_with_name::<Container>("NerdctlContainer");
|
||||
|
||||
// Register getters for Container properties
|
||||
engine.register_get("name", |container: &mut Container| container.name.clone());
|
||||
engine.register_get("container_id", |container: &mut Container| {
|
||||
match &container.container_id {
|
||||
Some(id) => id.clone(),
|
||||
None => "".to_string(),
|
||||
}
|
||||
});
|
||||
engine.register_get("image", |container: &mut Container| {
|
||||
match &container.image {
|
||||
Some(img) => img.clone(),
|
||||
None => "".to_string(),
|
||||
}
|
||||
});
|
||||
engine.register_get("ports", |container: &mut Container| {
|
||||
let mut array = Array::new();
|
||||
for port in &container.ports {
|
||||
array.push(Dynamic::from(port.clone()));
|
||||
}
|
||||
array
|
||||
});
|
||||
engine.register_get("volumes", |container: &mut Container| {
|
||||
let mut array = Array::new();
|
||||
for volume in &container.volumes {
|
||||
array.push(Dynamic::from(volume.clone()));
|
||||
}
|
||||
array
|
||||
});
|
||||
engine.register_get("detach", |container: &mut Container| container.detach);
|
||||
|
||||
// Register Image type and methods
|
||||
engine.register_type_with_name::<Image>("NerdctlImage");
|
||||
|
||||
// Register getters for Image properties
|
||||
engine.register_get("id", |img: &mut Image| img.id.clone());
|
||||
engine.register_get("repository", |img: &mut Image| img.repository.clone());
|
||||
engine.register_get("tag", |img: &mut Image| img.tag.clone());
|
||||
engine.register_get("size", |img: &mut Image| img.size.clone());
|
||||
engine.register_get("created", |img: &mut Image| img.created.clone());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
186
lib/ai/mcp/rhai/logic/prompts/errors.md
Normal file
186
lib/ai/mcp/rhai/logic/prompts/errors.md
Normal file
@@ -0,0 +1,186 @@
|
||||
# Common Errors in Rhai Wrappers and How to Fix Them
|
||||
|
||||
When creating Rhai wrappers for Rust functions, you might encounter several common errors. Here's how to address them:
|
||||
|
||||
## 1. `rhai_fn` Attribute Errors
|
||||
|
||||
```
|
||||
error: cannot find attribute `rhai_fn` in this scope
|
||||
```
|
||||
|
||||
**Solution**: Do not use the `#[rhai_fn]` attribute. Instead, register functions directly in the engine:
|
||||
|
||||
```rust
|
||||
// INCORRECT:
|
||||
#[rhai_fn(name = "pull_repository")]
|
||||
pub fn pull_repository(repo: &mut GitRepo) -> Dynamic { ... }
|
||||
|
||||
// CORRECT:
|
||||
pub fn pull_repository(repo: &mut GitRepo) -> Dynamic { ... }
|
||||
// Then register in engine.rs:
|
||||
engine.register_fn("pull_repository", pull_repository);
|
||||
```
|
||||
|
||||
## 2. Function Visibility Errors
|
||||
|
||||
```
|
||||
error[E0603]: function `create_rhai_engine` is private
|
||||
```
|
||||
|
||||
**Solution**: Make sure to declare functions as `pub` when they need to be accessed from other modules:
|
||||
|
||||
```rust
|
||||
// INCORRECT:
|
||||
fn create_rhai_engine() -> Engine { ... }
|
||||
|
||||
// CORRECT:
|
||||
pub fn create_rhai_engine() -> Engine { ... }
|
||||
```
|
||||
|
||||
## 3. Type Errors with String vs &str
|
||||
|
||||
```
|
||||
error[E0308]: `match` arms have incompatible types
|
||||
```
|
||||
|
||||
**Solution**: Ensure consistent return types in match arms. When one arm returns a string literal (`&str`) and another returns a `String`, convert them to be consistent:
|
||||
|
||||
```rust
|
||||
// INCORRECT:
|
||||
match r.pull() {
|
||||
Ok(_) => "Successfully pulled changes",
|
||||
Err(err) => {
|
||||
let error_msg = format!("Error pulling changes: {}", err);
|
||||
error_msg // This is a String, not matching the &str above
|
||||
}
|
||||
}
|
||||
|
||||
// CORRECT - Option 1: Convert &str to String
|
||||
match r.pull() {
|
||||
Ok(_) => String::from("Successfully pulled changes"),
|
||||
Err(err) => format!("Error pulling changes: {}", err)
|
||||
}
|
||||
|
||||
// CORRECT - Option 2: Use String::from for all string literals
|
||||
match r.pull() {
|
||||
Ok(_) => String::from("Successfully pulled changes"),
|
||||
Err(err) => {
|
||||
let error_msg = format!("Error pulling changes: {}", err);
|
||||
error_msg
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 4. Lifetime Errors
|
||||
|
||||
```
|
||||
error: lifetime may not live long enough
|
||||
```
|
||||
|
||||
**Solution**: When returning references from closures, you need to ensure the lifetime is valid. For path operations, convert to owned strings:
|
||||
|
||||
```rust
|
||||
// INCORRECT:
|
||||
repo_clone.wrap(|r| r.path())
|
||||
|
||||
// CORRECT:
|
||||
repo_clone.wrap(|r| r.path().to_string())
|
||||
```
|
||||
|
||||
## 5. Sized Trait Errors
|
||||
|
||||
```
|
||||
error[E0277]: the size for values of type `Self` cannot be known at compilation time
|
||||
```
|
||||
|
||||
**Solution**: Add a `Sized` bound to the `Self` type in trait definitions:
|
||||
|
||||
```rust
|
||||
// INCORRECT:
|
||||
trait RhaiWrapper {
|
||||
fn wrap<F, R>(&self, f: F) -> Dynamic
|
||||
where
|
||||
F: FnOnce(Self) -> R,
|
||||
R: ToRhai;
|
||||
}
|
||||
|
||||
// CORRECT:
|
||||
trait RhaiWrapper {
|
||||
fn wrap<F, R>(&self, f: F) -> Dynamic
|
||||
where
|
||||
F: FnOnce(Self) -> R,
|
||||
R: ToRhai,
|
||||
Self: Sized;
|
||||
}
|
||||
```
|
||||
|
||||
## 6. Unused Imports
|
||||
|
||||
```
|
||||
warning: unused imports: `Engine`, `EvalAltResult`, `FLOAT`, `INT`, and `plugin::*`
|
||||
```
|
||||
|
||||
**Solution**: Remove unused imports to clean up your code:
|
||||
|
||||
```rust
|
||||
// INCORRECT:
|
||||
use rhai::{Engine, EvalAltResult, plugin::*, FLOAT, INT, Dynamic, Map, Array};
|
||||
|
||||
// CORRECT - only keep what you use:
|
||||
use rhai::{Dynamic, Array};
|
||||
```
|
||||
|
||||
## 7. Overuse of Dynamic Types
|
||||
|
||||
```
|
||||
error[E0277]: the trait bound `Vec<Dynamic>: generic_wrapper::ToRhai` is not satisfied
|
||||
```
|
||||
|
||||
**Solution**: Use proper static typing instead of Dynamic types whenever possible. This improves type safety and makes the code more maintainable:
|
||||
|
||||
```rust
|
||||
// INCORRECT: Returning Dynamic for everything
|
||||
pub fn list_repositories(tree: &mut GitTree) -> Dynamic {
|
||||
let tree_clone = tree.clone();
|
||||
tree_clone.wrap(|t| {
|
||||
match t.list() {
|
||||
Ok(repos) => repos,
|
||||
Err(err) => vec![format!("Error listing repositories: {}", err)]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// CORRECT: Using proper Result types
|
||||
pub fn list_repositories(tree: &mut GitTree) -> Result<Vec<String>, Box<EvalAltResult>> {
|
||||
let tree_clone = tree.clone();
|
||||
tree_clone.list().map_err(|err| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("Error listing repositories: {}", err).into(),
|
||||
rhai::Position::NONE
|
||||
))
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## 8. Improper Error Handling
|
||||
|
||||
```
|
||||
error[E0277]: the trait bound `for<'a> fn(&'a mut Engine) -> Result<(), Box<EvalAltResult>> {wrapper::register_git_module}: RhaiNativeFunc<_, _, _, _, _>` is not satisfied
|
||||
```
|
||||
|
||||
**Solution**: When registering functions that return Result types, make sure they are properly handled:
|
||||
|
||||
```rust
|
||||
// INCORRECT: Trying to register a function that returns Result<(), Box<EvalAltResult>>
|
||||
engine.register_fn("register_git_module", wrapper::register_git_module);
|
||||
|
||||
// CORRECT: Wrap the function to handle the Result
|
||||
engine.register_fn("register_git_module", |engine: &mut Engine| {
|
||||
match wrapper::register_git_module(engine) {
|
||||
Ok(_) => Dynamic::from(true),
|
||||
Err(err) => Dynamic::from(format!("Error: {}", err))
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
Remember to adapt these solutions to your specific code context. The key is to maintain type consistency, proper visibility, correct lifetime management, and appropriate static typing.
|
||||
40
lib/ai/mcp/rhai/logic/prompts/example_script.md
Normal file
40
lib/ai/mcp/rhai/logic/prompts/example_script.md
Normal file
@@ -0,0 +1,40 @@
|
||||
## Example Rhai Script
|
||||
|
||||
Now, given the source code you wrapped using Rhai executable functions, write an example Rhai script that uses those functions.
|
||||
|
||||
### Example example rhai script
|
||||
|
||||
```rhai
|
||||
// example.rhai
|
||||
// Create a new GitTree instance
|
||||
let git_tree = new_git_tree("/Users/timurgordon/code");
|
||||
print("\nCreated GitTree for: /Users/timurgordon/code");
|
||||
|
||||
// List repositories in the tree
|
||||
let repos = list_repositories(git_tree);
|
||||
print("Found " + repos.len() + " repositories");
|
||||
|
||||
if repos.len() > 0 {
|
||||
print("First repository: " + repos[0]);
|
||||
|
||||
// Get the repository
|
||||
let repo_array = get_repositories(git_tree, repos[0]);
|
||||
|
||||
if repo_array.len() > 0 {
|
||||
let repo = repo_array[0];
|
||||
print("\nRepository path: " + get_repo_path(repo));
|
||||
|
||||
// Check if the repository has changes
|
||||
let has_changes = has_changes(repo);
|
||||
print("Has changes: " + has_changes);
|
||||
|
||||
// Try to pull the repository
|
||||
print("\nTrying to pull repository...");
|
||||
let pull_result = pull_repository(repo);
|
||||
print("Pull result: " + pull_result);
|
||||
}
|
||||
}
|
||||
|
||||
print("\nResult: Git operations completed successfully");
|
||||
42 // Return value
|
||||
```
|
||||
99
lib/ai/mcp/rhai/logic/prompts/main.md
Normal file
99
lib/ai/mcp/rhai/logic/prompts/main.md
Normal file
@@ -0,0 +1,99 @@
|
||||
You are a Rust developer tasked with creating Rhai wrappers for Rust functions. Please review the following best practices for Rhai wrappers and then create the necessary files.
|
||||
@{guides}
|
||||
@{vector_vs_array}
|
||||
@{example_rhai}
|
||||
@{wrapper_md}
|
||||
|
||||
## Common Errors to Avoid
|
||||
@{errors_md}
|
||||
@{rhai_integration_fixes}
|
||||
@{rhai_syntax_guide}
|
||||
|
||||
## Your Task
|
||||
|
||||
Please create a wrapper.rs file that implements Rhai wrappers for the provided Rust code, and an example.rhai script that demonstrates how to use these wrappers:
|
||||
|
||||
## Rust Code to Wrap
|
||||
|
||||
```rust
|
||||
@{source_code}
|
||||
```
|
||||
|
||||
IMPORTANT NOTES:
|
||||
1. For Rhai imports, use: `use rhai::{Engine, EvalAltResult, plugin::*, Dynamic, Map, Array};` - only import what you actually use
|
||||
2. The following dependencies are available in Cargo.toml:
|
||||
- rhai = "1.21.0"
|
||||
- serde = { version = "1.0", features = ["derive"] }
|
||||
- serde_json = "1.0"
|
||||
- @{source_pkg_info.name} = { path = "@{source_pkg_info.path}" }
|
||||
|
||||
3. For the wrapper: `use @{source_pkg_info.name}::@{source_pkg_info.module};` this way you can access the module functions and objects with @{source_pkg_info.module}::
|
||||
|
||||
4. The generic_wrapper.rs file will be hardcoded into the package, you can use code from there.
|
||||
|
||||
```rust
|
||||
@{generic_wrapper_rs}
|
||||
```
|
||||
|
||||
5. IMPORTANT: Prefer strongly typed return values over Dynamic types whenever possible. Only use Dynamic when absolutely necessary.
|
||||
- For example, return `Result<String, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a string
|
||||
- Use `Result<bool, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a boolean
|
||||
- Use `Result<Vec<String>, Box<EvalAltResult>>` instead of `Dynamic` when a function returns a list of strings
|
||||
|
||||
6. Your code should include public functions that can be called from Rhai scripts
|
||||
|
||||
7. Make sure to implement all necessary helper functions for type conversion
|
||||
|
||||
8. DO NOT use the #[rhai_fn] attribute - functions will be registered directly in the engine
|
||||
|
||||
9. Make sure to handle string type consistency - use String::from() for string literals when returning in match arms with format!() strings
|
||||
|
||||
10. When returning path references, convert them to owned strings (e.g., path().to_string())
|
||||
|
||||
11. For error handling, use proper Result types with Box<EvalAltResult> for the error type:
|
||||
```rust
|
||||
// INCORRECT:
|
||||
pub fn some_function(arg: &str) -> Dynamic {
|
||||
match some_operation(arg) {
|
||||
Ok(result) => Dynamic::from(result),
|
||||
Err(err) => Dynamic::from(format!("Error: {}", err))
|
||||
}
|
||||
}
|
||||
|
||||
// CORRECT:
|
||||
pub fn some_function(arg: &str) -> Result<String, Box<EvalAltResult>> {
|
||||
some_operation(arg).map_err(|err| {
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
format!("Error: {}", err).into(),
|
||||
rhai::Position::NONE
|
||||
))
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
12. IMPORTANT: Format your response with the code between triple backticks as follows:
|
||||
|
||||
```rust
|
||||
// wrapper.rs
|
||||
// Your wrapper implementation here
|
||||
```
|
||||
|
||||
```rust
|
||||
// engine.rs
|
||||
// Your engine.rs implementation here
|
||||
```
|
||||
|
||||
```rhai
|
||||
// example.rhai
|
||||
// Your example Rhai script here
|
||||
```
|
||||
|
||||
13. The example.rhai script should demonstrate the use of all the wrapper functions you create
|
||||
|
||||
14. The engine.rs file should contain a register_module function that registers all the wrapper functions and types with the Rhai engine, and a create function. For example:
|
||||
|
||||
@{engine}
|
||||
|
||||
MOST IMPORTANT:
|
||||
import package being wrapped as `use @{source_pkg_info.name}::@{source_pkg_info.module}`
|
||||
your engine create function is called `create_rhai_engine`
|
||||
473
lib/ai/mcp/rhai/logic/prompts/wrapper.md
Normal file
473
lib/ai/mcp/rhai/logic/prompts/wrapper.md
Normal file
@@ -0,0 +1,473 @@
|
||||
|
||||
# Wrapper
|
||||
|
||||
Here is an example of a well-implemented Rhai wrapper for the Git module:
|
||||
|
||||
## Example wrapper
|
||||
|
||||
```rust
|
||||
// wrapper.rs
|
||||
//! Rhai wrappers for Nerdctl module functions
|
||||
//!
|
||||
//! This module provides Rhai wrappers for the functions in the Nerdctl module.
|
||||
|
||||
use rhai::{Engine, EvalAltResult, Array, Dynamic, Map};
|
||||
use crate::virt::nerdctl::{self, NerdctlError, Image, Container};
|
||||
use crate::process::CommandResult;
|
||||
|
||||
// Helper functions for error conversion with improved context
|
||||
fn nerdctl_error_to_rhai_error<T>(result: Result<T, NerdctlError>) -> Result<T, Box<EvalAltResult>> {
|
||||
result.map_err(|e| {
|
||||
// Create a more detailed error message based on the error type
|
||||
let error_message = match &e {
|
||||
NerdctlError::CommandExecutionFailed(io_err) => {
|
||||
format!("Failed to execute nerdctl command: {}. This may indicate nerdctl is not installed or not in PATH.", io_err)
|
||||
},
|
||||
NerdctlError::CommandFailed(msg) => {
|
||||
format!("Nerdctl command failed: {}. Check container status and logs for more details.", msg)
|
||||
},
|
||||
NerdctlError::JsonParseError(msg) => {
|
||||
format!("Failed to parse nerdctl JSON output: {}. This may indicate an incompatible nerdctl version.", msg)
|
||||
},
|
||||
NerdctlError::ConversionError(msg) => {
|
||||
format!("Data conversion error: {}. This may indicate unexpected output format from nerdctl.", msg)
|
||||
},
|
||||
NerdctlError::Other(msg) => {
|
||||
format!("Nerdctl error: {}. This is an unexpected error.", msg)
|
||||
},
|
||||
};
|
||||
|
||||
Box::new(EvalAltResult::ErrorRuntime(
|
||||
error_message.into(),
|
||||
rhai::Position::NONE
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
//
|
||||
// Container Builder Pattern Implementation
|
||||
//
|
||||
|
||||
/// Create a new Container
|
||||
pub fn container_new(name: &str) -> Result<Container, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(Container::new(name))
|
||||
}
|
||||
|
||||
/// Create a Container from an image
|
||||
pub fn container_from_image(name: &str, image: &str) -> Result<Container, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(Container::from_image(name, image))
|
||||
}
|
||||
|
||||
/// Reset the container configuration to defaults while keeping the name and image
|
||||
pub fn container_reset(container: Container) -> Container {
|
||||
container.reset()
|
||||
}
|
||||
|
||||
/// Add a port mapping to a Container
|
||||
pub fn container_with_port(container: Container, port: &str) -> Container {
|
||||
container.with_port(port)
|
||||
}
|
||||
|
||||
/// Add a volume mount to a Container
|
||||
pub fn container_with_volume(container: Container, volume: &str) -> Container {
|
||||
container.with_volume(volume)
|
||||
}
|
||||
|
||||
/// Add an environment variable to a Container
|
||||
pub fn container_with_env(container: Container, key: &str, value: &str) -> Container {
|
||||
container.with_env(key, value)
|
||||
}
|
||||
|
||||
/// Set the network for a Container
|
||||
pub fn container_with_network(container: Container, network: &str) -> Container {
|
||||
container.with_network(network)
|
||||
}
|
||||
|
||||
/// Add a network alias to a Container
|
||||
pub fn container_with_network_alias(container: Container, alias: &str) -> Container {
|
||||
container.with_network_alias(alias)
|
||||
}
|
||||
|
||||
/// Set CPU limit for a Container
|
||||
pub fn container_with_cpu_limit(container: Container, cpus: &str) -> Container {
|
||||
container.with_cpu_limit(cpus)
|
||||
}
|
||||
|
||||
/// Set memory limit for a Container
|
||||
pub fn container_with_memory_limit(container: Container, memory: &str) -> Container {
|
||||
container.with_memory_limit(memory)
|
||||
}
|
||||
|
||||
/// Set restart policy for a Container
|
||||
pub fn container_with_restart_policy(container: Container, policy: &str) -> Container {
|
||||
container.with_restart_policy(policy)
|
||||
}
|
||||
|
||||
/// Set health check for a Container
|
||||
pub fn container_with_health_check(container: Container, cmd: &str) -> Container {
|
||||
container.with_health_check(cmd)
|
||||
}
|
||||
|
||||
/// Add multiple port mappings to a Container
|
||||
pub fn container_with_ports(mut container: Container, ports: Array) -> Container {
|
||||
for port in ports.iter() {
|
||||
if port.is_string() {
|
||||
let port_str = port.clone().cast::<String>();
|
||||
container = container.with_port(&port_str);
|
||||
}
|
||||
}
|
||||
container
|
||||
}
|
||||
|
||||
/// Add multiple volume mounts to a Container
|
||||
pub fn container_with_volumes(mut container: Container, volumes: Array) -> Container {
|
||||
for volume in volumes.iter() {
|
||||
if volume.is_string() {
|
||||
let volume_str = volume.clone().cast::<String>();
|
||||
container = container.with_volume(&volume_str);
|
||||
}
|
||||
}
|
||||
container
|
||||
}
|
||||
|
||||
/// Add multiple environment variables to a Container
|
||||
pub fn container_with_envs(mut container: Container, env_map: Map) -> Container {
|
||||
for (key, value) in env_map.iter() {
|
||||
if value.is_string() {
|
||||
let value_str = value.clone().cast::<String>();
|
||||
container = container.with_env(&key, &value_str);
|
||||
}
|
||||
}
|
||||
container
|
||||
}
|
||||
|
||||
/// Add multiple network aliases to a Container
|
||||
pub fn container_with_network_aliases(mut container: Container, aliases: Array) -> Container {
|
||||
for alias in aliases.iter() {
|
||||
if alias.is_string() {
|
||||
let alias_str = alias.clone().cast::<String>();
|
||||
container = container.with_network_alias(&alias_str);
|
||||
}
|
||||
}
|
||||
container
|
||||
}
|
||||
|
||||
/// Set memory swap limit for a Container
|
||||
pub fn container_with_memory_swap_limit(container: Container, memory_swap: &str) -> Container {
|
||||
container.with_memory_swap_limit(memory_swap)
|
||||
}
|
||||
|
||||
/// Set CPU shares for a Container
|
||||
pub fn container_with_cpu_shares(container: Container, shares: &str) -> Container {
|
||||
container.with_cpu_shares(shares)
|
||||
}
|
||||
|
||||
/// Set health check with options for a Container
|
||||
pub fn container_with_health_check_options(
|
||||
container: Container,
|
||||
cmd: &str,
|
||||
interval: Option<&str>,
|
||||
timeout: Option<&str>,
|
||||
retries: Option<i64>,
|
||||
start_period: Option<&str>
|
||||
) -> Container {
|
||||
// Convert i64 to u32 for retries
|
||||
let retries_u32 = retries.map(|r| r as u32);
|
||||
container.with_health_check_options(cmd, interval, timeout, retries_u32, start_period)
|
||||
}
|
||||
|
||||
/// Set snapshotter for a Container
|
||||
pub fn container_with_snapshotter(container: Container, snapshotter: &str) -> Container {
|
||||
container.with_snapshotter(snapshotter)
|
||||
}
|
||||
|
||||
/// Set detach mode for a Container
|
||||
pub fn container_with_detach(container: Container, detach: bool) -> Container {
|
||||
container.with_detach(detach)
|
||||
}
|
||||
|
||||
/// Build and run the Container
|
||||
///
|
||||
/// This function builds and runs the container using the configured options.
|
||||
/// It provides detailed error information if the build fails.
|
||||
pub fn container_build(container: Container) -> Result<Container, Box<EvalAltResult>> {
|
||||
// Get container details for better error reporting
|
||||
let container_name = container.name.clone();
|
||||
let image = container.image.clone().unwrap_or_else(|| "none".to_string());
|
||||
let ports = container.ports.clone();
|
||||
let volumes = container.volumes.clone();
|
||||
let env_vars = container.env_vars.clone();
|
||||
|
||||
// Try to build the container
|
||||
let build_result = container.build();
|
||||
|
||||
// Handle the result with improved error context
|
||||
match build_result {
|
||||
Ok(built_container) => {
|
||||
// Container built successfully
|
||||
Ok(built_container)
|
||||
},
|
||||
Err(err) => {
|
||||
// Add more context to the error
|
||||
let enhanced_error = match err {
|
||||
NerdctlError::CommandFailed(msg) => {
|
||||
// Provide more detailed error information
|
||||
let mut enhanced_msg = format!("Failed to build container '{}' from image '{}': {}",
|
||||
container_name, image, msg);
|
||||
|
||||
// Add information about configured options that might be relevant
|
||||
if !ports.is_empty() {
|
||||
enhanced_msg.push_str(&format!("\nConfigured ports: {:?}", ports));
|
||||
}
|
||||
|
||||
if !volumes.is_empty() {
|
||||
enhanced_msg.push_str(&format!("\nConfigured volumes: {:?}", volumes));
|
||||
}
|
||||
|
||||
if !env_vars.is_empty() {
|
||||
enhanced_msg.push_str(&format!("\nConfigured environment variables: {:?}", env_vars));
|
||||
}
|
||||
|
||||
// Add suggestions for common issues
|
||||
if msg.contains("not found") || msg.contains("no such image") {
|
||||
enhanced_msg.push_str("\nSuggestion: The specified image may not exist or may not be pulled yet. Try pulling the image first with nerdctl_image_pull().");
|
||||
} else if msg.contains("port is already allocated") {
|
||||
enhanced_msg.push_str("\nSuggestion: One of the specified ports is already in use. Try using a different port or stopping the container using that port.");
|
||||
} else if msg.contains("permission denied") {
|
||||
enhanced_msg.push_str("\nSuggestion: Permission issues detected. Check if you have the necessary permissions to create containers or access the specified volumes.");
|
||||
}
|
||||
|
||||
NerdctlError::CommandFailed(enhanced_msg)
|
||||
},
|
||||
_ => err
|
||||
};
|
||||
|
||||
nerdctl_error_to_rhai_error(Err(enhanced_error))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the Container and verify it's running
|
||||
///
|
||||
/// This function starts the container and verifies that it's actually running.
|
||||
/// It returns detailed error information if the container fails to start or
|
||||
/// if it starts but stops immediately.
|
||||
pub fn container_start(container: &mut Container) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
// Get container details for better error reporting
|
||||
let container_name = container.name.clone();
|
||||
let container_id = container.container_id.clone().unwrap_or_else(|| "unknown".to_string());
|
||||
|
||||
// Try to start the container
|
||||
let start_result = container.start();
|
||||
|
||||
// Handle the result with improved error context
|
||||
match start_result {
|
||||
Ok(result) => {
|
||||
// Container started successfully
|
||||
Ok(result)
|
||||
},
|
||||
Err(err) => {
|
||||
// Add more context to the error
|
||||
let enhanced_error = match err {
|
||||
NerdctlError::CommandFailed(msg) => {
|
||||
// Check if this is a "container already running" error, which is not really an error
|
||||
if msg.contains("already running") {
|
||||
return Ok(CommandResult {
|
||||
stdout: format!("Container {} is already running", container_name),
|
||||
stderr: "".to_string(),
|
||||
success: true,
|
||||
code: 0,
|
||||
});
|
||||
}
|
||||
|
||||
// Try to get more information about why the container might have failed to start
|
||||
let mut enhanced_msg = format!("Failed to start container '{}' (ID: {}): {}",
|
||||
container_name, container_id, msg);
|
||||
|
||||
// Try to check if the image exists
|
||||
if let Some(image) = &container.image {
|
||||
enhanced_msg.push_str(&format!("\nContainer was using image: {}", image));
|
||||
}
|
||||
|
||||
NerdctlError::CommandFailed(enhanced_msg)
|
||||
},
|
||||
_ => err
|
||||
};
|
||||
|
||||
nerdctl_error_to_rhai_error(Err(enhanced_error))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the Container
|
||||
pub fn container_stop(container: &mut Container) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(container.stop())
|
||||
}
|
||||
|
||||
/// Remove the Container
|
||||
pub fn container_remove(container: &mut Container) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(container.remove())
|
||||
}
|
||||
|
||||
/// Execute a command in the Container
|
||||
pub fn container_exec(container: &mut Container, command: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(container.exec(command))
|
||||
}
|
||||
|
||||
/// Get container logs
|
||||
pub fn container_logs(container: &mut Container) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
// Get container details for better error reporting
|
||||
let container_name = container.name.clone();
|
||||
let container_id = container.container_id.clone().unwrap_or_else(|| "unknown".to_string());
|
||||
|
||||
// Use the nerdctl::logs function
|
||||
let logs_result = nerdctl::logs(&container_id);
|
||||
|
||||
match logs_result {
|
||||
Ok(result) => {
|
||||
Ok(result)
|
||||
},
|
||||
Err(err) => {
|
||||
// Add more context to the error
|
||||
let enhanced_error = NerdctlError::CommandFailed(
|
||||
format!("Failed to get logs for container '{}' (ID: {}): {}",
|
||||
container_name, container_id, err)
|
||||
);
|
||||
|
||||
nerdctl_error_to_rhai_error(Err(enhanced_error))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Copy files between the Container and local filesystem
|
||||
pub fn container_copy(container: &mut Container, source: &str, dest: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(container.copy(source, dest))
|
||||
}
|
||||
|
||||
/// Create a new Map with default run options
|
||||
pub fn new_run_options() -> Map {
|
||||
let mut map = Map::new();
|
||||
map.insert("name".into(), Dynamic::UNIT);
|
||||
map.insert("detach".into(), Dynamic::from(true));
|
||||
map.insert("ports".into(), Dynamic::from(Array::new()));
|
||||
map.insert("snapshotter".into(), Dynamic::from("native"));
|
||||
map
|
||||
}
|
||||
|
||||
//
|
||||
// Container Function Wrappers
|
||||
//
|
||||
|
||||
/// Wrapper for nerdctl::run
|
||||
///
|
||||
/// Run a container from an image.
|
||||
pub fn nerdctl_run(image: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::run(image, None, true, None, None))
|
||||
}
|
||||
|
||||
/// Run a container with a name
|
||||
pub fn nerdctl_run_with_name(image: &str, name: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::run(image, Some(name), true, None, None))
|
||||
}
|
||||
|
||||
/// Run a container with a port mapping
|
||||
pub fn nerdctl_run_with_port(image: &str, name: &str, port: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
let ports = vec![port];
|
||||
nerdctl_error_to_rhai_error(nerdctl::run(image, Some(name), true, Some(&ports), None))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::exec
|
||||
///
|
||||
/// Execute a command in a container.
|
||||
pub fn nerdctl_exec(container: &str, command: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::exec(container, command))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::copy
|
||||
///
|
||||
/// Copy files between container and local filesystem.
|
||||
pub fn nerdctl_copy(source: &str, dest: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::copy(source, dest))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::stop
|
||||
///
|
||||
/// Stop a container.
|
||||
pub fn nerdctl_stop(container: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::stop(container))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::remove
|
||||
///
|
||||
/// Remove a container.
|
||||
pub fn nerdctl_remove(container: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::remove(container))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::list
|
||||
///
|
||||
/// List containers.
|
||||
pub fn nerdctl_list(all: bool) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::list(all))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::logs
|
||||
///
|
||||
/// Get container logs.
|
||||
pub fn nerdctl_logs(container: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::logs(container))
|
||||
}
|
||||
|
||||
//
|
||||
// Image Function Wrappers
|
||||
//
|
||||
|
||||
/// Wrapper for nerdctl::images
|
||||
///
|
||||
/// List images in local storage.
|
||||
pub fn nerdctl_images() -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::images())
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::image_remove
|
||||
///
|
||||
/// Remove one or more images.
|
||||
pub fn nerdctl_image_remove(image: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::image_remove(image))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::image_push
|
||||
///
|
||||
/// Push an image to a registry.
|
||||
pub fn nerdctl_image_push(image: &str, destination: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::image_push(image, destination))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::image_tag
|
||||
///
|
||||
/// Add an additional name to a local image.
|
||||
pub fn nerdctl_image_tag(image: &str, new_name: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::image_tag(image, new_name))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::image_pull
|
||||
///
|
||||
/// Pull an image from a registry.
|
||||
pub fn nerdctl_image_pull(image: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::image_pull(image))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::image_commit
|
||||
///
|
||||
/// Commit a container to an image.
|
||||
pub fn nerdctl_image_commit(container: &str, image_name: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::image_commit(container, image_name))
|
||||
}
|
||||
|
||||
/// Wrapper for nerdctl::image_build
|
||||
///
|
||||
/// Build an image using a Dockerfile.
|
||||
pub fn nerdctl_image_build(tag: &str, context_path: &str) -> Result<CommandResult, Box<EvalAltResult>> {
|
||||
nerdctl_error_to_rhai_error(nerdctl::image_build(tag, context_path))
|
||||
}
|
||||
```
|
||||
10
lib/ai/mcp/rhai/logic/templates/cargo.toml
Normal file
10
lib/ai/mcp/rhai/logic/templates/cargo.toml
Normal file
@@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "@{name}_rhai"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
rhai = "1.21.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
@{source_pkg_info.name} = { path = "@{source_pkg_info.path}" }
|
||||
12
lib/ai/mcp/rhai/logic/templates/engine.rs
Normal file
12
lib/ai/mcp/rhai/logic/templates/engine.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
use rhai::{Engine, EvalAltResult, Map, Dynamic};
|
||||
use crate::wrapper;
|
||||
|
||||
pub fn create_rhai_engine() -> Engine {
|
||||
let mut engine = Engine::new();
|
||||
|
||||
@for function in functions
|
||||
engine.register_fn("@{function}", wrapper::@{function});
|
||||
@end
|
||||
|
||||
engine
|
||||
}
|
||||
40
lib/ai/mcp/rhai/logic/templates/example.rs
Normal file
40
lib/ai/mcp/rhai/logic/templates/example.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
use std::{fs, path::Path};
|
||||
use @{name}_rhai::create_rhai_engine;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!("=== Rhai Wrapper Example ===");
|
||||
|
||||
// Create a Rhai engine with functionality
|
||||
let mut engine = create_rhai_engine();
|
||||
println!("Successfully created Rhai engine");
|
||||
|
||||
// Get the path to the example.rhai script
|
||||
let script_path = get_script_path()?;
|
||||
println!("Loading script from: {}", script_path.display());
|
||||
|
||||
// Load the script content
|
||||
let script = fs::read_to_string(&script_path)
|
||||
.map_err(|e| format!("Failed to read script file: {}", e))?;
|
||||
|
||||
// Run the script
|
||||
println!("\n=== Running Rhai script ===");
|
||||
let result = engine.eval::<i64>(&script)
|
||||
.map_err(|e| format!("Script execution error: {}", e))?;
|
||||
|
||||
println!("\nScript returned: {}", result);
|
||||
println!("\nExample completed successfully!");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_script_path() -> Result<std::path::PathBuf, String> {
|
||||
// When running with cargo run --example, the script will be in the examples directory
|
||||
let script_path = Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("examples")
|
||||
.join("example.rhai");
|
||||
|
||||
if script_path.exists() {
|
||||
Ok(script_path)
|
||||
} else {
|
||||
Err(format!("Could not find example.rhai script at {}", script_path.display()))
|
||||
}
|
||||
}
|
||||
510
lib/ai/mcp/rhai/logic/templates/functions.rs
Normal file
510
lib/ai/mcp/rhai/logic/templates/functions.rs
Normal file
@@ -0,0 +1,510 @@
|
||||
// File: /root/code/git.ourworld.tf/herocode/sal/src/virt/nerdctl/container_builder.rs
|
||||
|
||||
use std::collections::HashMap;
|
||||
use crate::virt::nerdctl::{execute_nerdctl_command, NerdctlError};
|
||||
use super::container_types::{Container, HealthCheck};
|
||||
use super::health_check_script::prepare_health_check_command;
|
||||
|
||||
impl Container {
|
||||
/// Reset the container configuration to defaults while keeping the name and image
|
||||
/// If the container exists, it will be stopped and removed.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn reset(self) -> Self {
|
||||
let name = self.name;
|
||||
let image = self.image.clone();
|
||||
|
||||
// If container exists, stop and remove it
|
||||
if let Some(container_id) = &self.container_id {
|
||||
println!("Container exists. Stopping and removing container '{}'...", name);
|
||||
|
||||
// Try to stop the container
|
||||
let _ = execute_nerdctl_command(&["stop", container_id]);
|
||||
|
||||
// Try to remove the container
|
||||
let _ = execute_nerdctl_command(&["rm", container_id]);
|
||||
}
|
||||
|
||||
// Create a new container with just the name and image, but no container_id
|
||||
Self {
|
||||
name,
|
||||
container_id: None, // Reset container_id to None since we removed the container
|
||||
image,
|
||||
config: std::collections::HashMap::new(),
|
||||
ports: Vec::new(),
|
||||
volumes: Vec::new(),
|
||||
env_vars: std::collections::HashMap::new(),
|
||||
network: None,
|
||||
network_aliases: Vec::new(),
|
||||
cpu_limit: None,
|
||||
memory_limit: None,
|
||||
memory_swap_limit: None,
|
||||
cpu_shares: None,
|
||||
restart_policy: None,
|
||||
health_check: None,
|
||||
detach: false,
|
||||
snapshotter: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a port mapping
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `port` - Port mapping (e.g., "8080:80")
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_port(mut self, port: &str) -> Self {
|
||||
self.ports.push(port.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Add multiple port mappings
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `ports` - Array of port mappings (e.g., ["8080:80", "8443:443"])
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_ports(mut self, ports: &[&str]) -> Self {
|
||||
for port in ports {
|
||||
self.ports.push(port.to_string());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a volume mount
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `volume` - Volume mount (e.g., "/host/path:/container/path")
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_volume(mut self, volume: &str) -> Self {
|
||||
self.volumes.push(volume.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Add multiple volume mounts
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `volumes` - Array of volume mounts (e.g., ["/host/path1:/container/path1", "/host/path2:/container/path2"])
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_volumes(mut self, volumes: &[&str]) -> Self {
|
||||
for volume in volumes {
|
||||
self.volumes.push(volume.to_string());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Add an environment variable
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - Environment variable name
|
||||
/// * `value` - Environment variable value
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_env(mut self, key: &str, value: &str) -> Self {
|
||||
self.env_vars.insert(key.to_string(), value.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Add multiple environment variables
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `env_map` - Map of environment variable names to values
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_envs(mut self, env_map: &HashMap<&str, &str>) -> Self {
|
||||
for (key, value) in env_map {
|
||||
self.env_vars.insert(key.to_string(), value.to_string());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the network for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `network` - Network name
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_network(mut self, network: &str) -> Self {
|
||||
self.network = Some(network.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a network alias for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `alias` - Network alias
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_network_alias(mut self, alias: &str) -> Self {
|
||||
self.network_aliases.push(alias.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Add multiple network aliases for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `aliases` - Array of network aliases
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_network_aliases(mut self, aliases: &[&str]) -> Self {
|
||||
for alias in aliases {
|
||||
self.network_aliases.push(alias.to_string());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Set CPU limit for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `cpus` - CPU limit (e.g., "0.5" for half a CPU, "2" for 2 CPUs)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_cpu_limit(mut self, cpus: &str) -> Self {
|
||||
self.cpu_limit = Some(cpus.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set memory limit for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `memory` - Memory limit (e.g., "512m" for 512MB, "1g" for 1GB)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_memory_limit(mut self, memory: &str) -> Self {
|
||||
self.memory_limit = Some(memory.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set memory swap limit for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `memory_swap` - Memory swap limit (e.g., "1g" for 1GB)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_memory_swap_limit(mut self, memory_swap: &str) -> Self {
|
||||
self.memory_swap_limit = Some(memory_swap.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set CPU shares for the container (relative weight)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `shares` - CPU shares (e.g., "1024" for default, "512" for half)
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_cpu_shares(mut self, shares: &str) -> Self {
|
||||
self.cpu_shares = Some(shares.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set restart policy for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `policy` - Restart policy (e.g., "no", "always", "on-failure", "unless-stopped")
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_restart_policy(mut self, policy: &str) -> Self {
|
||||
self.restart_policy = Some(policy.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a simple health check for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `cmd` - Command to run for health check (e.g., "curl -f http://localhost/ || exit 1")
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_health_check(mut self, cmd: &str) -> Self {
|
||||
// Use the health check script module to prepare the command
|
||||
let prepared_cmd = prepare_health_check_command(cmd, &self.name);
|
||||
|
||||
self.health_check = Some(HealthCheck {
|
||||
cmd: prepared_cmd,
|
||||
interval: None,
|
||||
timeout: None,
|
||||
retries: None,
|
||||
start_period: None,
|
||||
});
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a health check with custom options for the container
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `cmd` - Command to run for health check
|
||||
/// * `interval` - Optional time between running the check (e.g., "30s", "1m")
|
||||
/// * `timeout` - Optional maximum time to wait for a check to complete (e.g., "30s", "1m")
|
||||
/// * `retries` - Optional number of consecutive failures needed to consider unhealthy
|
||||
/// * `start_period` - Optional start period for the container to initialize before counting retries (e.g., "30s", "1m")
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_health_check_options(
|
||||
mut self,
|
||||
cmd: &str,
|
||||
interval: Option<&str>,
|
||||
timeout: Option<&str>,
|
||||
retries: Option<u32>,
|
||||
start_period: Option<&str>,
|
||||
) -> Self {
|
||||
// Use the health check script module to prepare the command
|
||||
let prepared_cmd = prepare_health_check_command(cmd, &self.name);
|
||||
|
||||
let mut health_check = HealthCheck {
|
||||
cmd: prepared_cmd,
|
||||
interval: None,
|
||||
timeout: None,
|
||||
retries: None,
|
||||
start_period: None,
|
||||
};
|
||||
|
||||
if let Some(interval_value) = interval {
|
||||
health_check.interval = Some(interval_value.to_string());
|
||||
}
|
||||
|
||||
if let Some(timeout_value) = timeout {
|
||||
health_check.timeout = Some(timeout_value.to_string());
|
||||
}
|
||||
|
||||
if let Some(retries_value) = retries {
|
||||
health_check.retries = Some(retries_value);
|
||||
}
|
||||
|
||||
if let Some(start_period_value) = start_period {
|
||||
health_check.start_period = Some(start_period_value.to_string());
|
||||
}
|
||||
|
||||
self.health_check = Some(health_check);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the snapshotter
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `snapshotter` - Snapshotter to use
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_snapshotter(mut self, snapshotter: &str) -> Self {
|
||||
self.snapshotter = Some(snapshotter.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set whether to run in detached mode
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `detach` - Whether to run in detached mode
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Self` - The container instance for method chaining
|
||||
pub fn with_detach(mut self, detach: bool) -> Self {
|
||||
self.detach = detach;
|
||||
self
|
||||
}
|
||||
|
||||
/// Build the container
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `Result<Self, NerdctlError>` - Container instance or error
|
||||
pub fn build(self) -> Result<Self, NerdctlError> {
|
||||
// If container already exists, return it
|
||||
if self.container_id.is_some() {
|
||||
return Ok(self);
|
||||
}
|
||||
|
||||
// If no image is specified, return an error
|
||||
let image = match &self.image {
|
||||
Some(img) => img,
|
||||
None => return Err(NerdctlError::Other("No image specified for container creation".to_string())),
|
||||
};
|
||||
|
||||
// Build the command arguments as strings
|
||||
let mut args_strings = Vec::new();
|
||||
args_strings.push("run".to_string());
|
||||
|
||||
if self.detach {
|
||||
args_strings.push("-d".to_string());
|
||||
}
|
||||
|
||||
args_strings.push("--name".to_string());
|
||||
args_strings.push(self.name.clone());
|
||||
|
||||
// Add port mappings
|
||||
for port in &self.ports {
|
||||
args_strings.push("-p".to_string());
|
||||
args_strings.push(port.clone());
|
||||
}
|
||||
|
||||
// Add volume mounts
|
||||
for volume in &self.volumes {
|
||||
args_strings.push("-v".to_string());
|
||||
args_strings.push(volume.clone());
|
||||
}
|
||||
|
||||
// Add environment variables
|
||||
for (key, value) in &self.env_vars {
|
||||
args_strings.push("-e".to_string());
|
||||
args_strings.push(format!("{}={}", key, value));
|
||||
}
|
||||
|
||||
// Add network configuration
|
||||
if let Some(network) = &self.network {
|
||||
args_strings.push("--network".to_string());
|
||||
args_strings.push(network.clone());
|
||||
}
|
||||
|
||||
// Add network aliases
|
||||
for alias in &self.network_aliases {
|
||||
args_strings.push("--network-alias".to_string());
|
||||
args_strings.push(alias.clone());
|
||||
}
|
||||
|
||||
// Add resource limits
|
||||
if let Some(cpu_limit) = &self.cpu_limit {
|
||||
args_strings.push("--cpus".to_string());
|
||||
args_strings.push(cpu_limit.clone());
|
||||
}
|
||||
|
||||
if let Some(memory_limit) = &self.memory_limit {
|
||||
args_strings.push("--memory".to_string());
|
||||
args_strings.push(memory_limit.clone());
|
||||
}
|
||||
|
||||
if let Some(memory_swap_limit) = &self.memory_swap_limit {
|
||||
args_strings.push("--memory-swap".to_string());
|
||||
args_strings.push(memory_swap_limit.clone());
|
||||
}
|
||||
|
||||
if let Some(cpu_shares) = &self.cpu_shares {
|
||||
args_strings.push("--cpu-shares".to_string());
|
||||
args_strings.push(cpu_shares.clone());
|
||||
}
|
||||
|
||||
// Add restart policy
|
||||
if let Some(restart_policy) = &self.restart_policy {
|
||||
args_strings.push("--restart".to_string());
|
||||
args_strings.push(restart_policy.clone());
|
||||
}
|
||||
|
||||
// Add health check
|
||||
if let Some(health_check) = &self.health_check {
|
||||
args_strings.push("--health-cmd".to_string());
|
||||
args_strings.push(health_check.cmd.clone());
|
||||
|
||||
if let Some(interval) = &health_check.interval {
|
||||
args_strings.push("--health-interval".to_string());
|
||||
args_strings.push(interval.clone());
|
||||
}
|
||||
|
||||
if let Some(timeout) = &health_check.timeout {
|
||||
args_strings.push("--health-timeout".to_string());
|
||||
args_strings.push(timeout.clone());
|
||||
}
|
||||
|
||||
if let Some(retries) = &health_check.retries {
|
||||
args_strings.push("--health-retries".to_string());
|
||||
args_strings.push(retries.to_string());
|
||||
}
|
||||
|
||||
if let Some(start_period) = &health_check.start_period {
|
||||
args_strings.push("--health-start-period".to_string());
|
||||
args_strings.push(start_period.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(snapshotter_value) = &self.snapshotter {
|
||||
args_strings.push("--snapshotter".to_string());
|
||||
args_strings.push(snapshotter_value.clone());
|
||||
}
|
||||
|
||||
// Add flags to avoid BPF issues
|
||||
args_strings.push("--cgroup-manager=cgroupfs".to_string());
|
||||
|
||||
args_strings.push(image.clone());
|
||||
|
||||
// Convert to string slices for the command
|
||||
let args: Vec<&str> = args_strings.iter().map(|s| s.as_str()).collect();
|
||||
|
||||
// Execute the command
|
||||
let result = execute_nerdctl_command(&args)?;
|
||||
|
||||
// Get the container ID from the output
|
||||
let container_id = result.stdout.trim().to_string();
|
||||
|
||||
Ok(Self {
|
||||
name: self.name,
|
||||
container_id: Some(container_id),
|
||||
image: self.image,
|
||||
config: self.config,
|
||||
ports: self.ports,
|
||||
volumes: self.volumes,
|
||||
env_vars: self.env_vars,
|
||||
network: self.network,
|
||||
network_aliases: self.network_aliases,
|
||||
cpu_limit: self.cpu_limit,
|
||||
memory_limit: self.memory_limit,
|
||||
memory_swap_limit: self.memory_swap_limit,
|
||||
cpu_shares: self.cpu_shares,
|
||||
restart_policy: self.restart_policy,
|
||||
health_check: self.health_check,
|
||||
detach: self.detach,
|
||||
snapshotter: self.snapshotter,
|
||||
})
|
||||
}
|
||||
}
|
||||
132
lib/ai/mcp/rhai/logic/templates/generic_wrapper.rs
Normal file
132
lib/ai/mcp/rhai/logic/templates/generic_wrapper.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
use std::collections::HashMap;
|
||||
use rhai::{Dynamic, Map, Array};
|
||||
|
||||
/// Local wrapper trait for sal::rhai::ToRhai to avoid orphan rule violations
|
||||
pub trait ToRhai {
|
||||
/// Convert to a Rhai Dynamic value
|
||||
fn to_rhai(&self) -> Dynamic;
|
||||
}
|
||||
|
||||
// Implementation of ToRhai for Dynamic
|
||||
impl ToRhai for Dynamic {
|
||||
fn to_rhai(&self) -> Dynamic {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Generic trait for wrapping Rust functions to be used with Rhai
|
||||
pub trait RhaiWrapper {
|
||||
/// Wrap a function that takes ownership of self
|
||||
fn wrap_consuming<F, R>(self, f: F) -> Dynamic
|
||||
where
|
||||
Self: Sized + Clone,
|
||||
F: FnOnce(Self) -> R,
|
||||
R: ToRhai;
|
||||
|
||||
/// Wrap a function that takes a mutable reference to self
|
||||
fn wrap_mut<F, R>(&mut self, f: F) -> Dynamic
|
||||
where
|
||||
Self: Sized + Clone,
|
||||
F: FnOnce(&mut Self) -> R,
|
||||
R: ToRhai;
|
||||
|
||||
/// Wrap a function that takes an immutable reference to self
|
||||
fn wrap<F, R>(&self, f: F) -> Dynamic
|
||||
where
|
||||
Self: Sized + Clone,
|
||||
F: FnOnce(&Self) -> R,
|
||||
R: ToRhai;
|
||||
}
|
||||
|
||||
/// Implementation of RhaiWrapper for any type
|
||||
impl<T> RhaiWrapper for T {
|
||||
fn wrap_consuming<F, R>(self, f: F) -> Dynamic
|
||||
where
|
||||
Self: Sized + Clone,
|
||||
F: FnOnce(Self) -> R,
|
||||
R: ToRhai,
|
||||
{
|
||||
let result = f(self);
|
||||
result.to_rhai()
|
||||
}
|
||||
|
||||
fn wrap_mut<F, R>(&mut self, f: F) -> Dynamic
|
||||
where
|
||||
Self: Sized + Clone,
|
||||
F: FnOnce(&mut Self) -> R,
|
||||
R: ToRhai,
|
||||
{
|
||||
let result = f(self);
|
||||
result.to_rhai()
|
||||
}
|
||||
|
||||
fn wrap<F, R>(&self, f: F) -> Dynamic
|
||||
where
|
||||
Self: Sized + Clone,
|
||||
F: FnOnce(&Self) -> R,
|
||||
R: ToRhai,
|
||||
{
|
||||
let result = f(self);
|
||||
result.to_rhai()
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a Rhai Map to a Rust HashMap
|
||||
pub fn map_to_hashmap(map: &Map) -> HashMap<String, String> {
|
||||
let mut result = HashMap::new();
|
||||
for (key, value) in map.iter() {
|
||||
let k = key.clone().to_string();
|
||||
let v = value.clone().to_string();
|
||||
if !k.is_empty() && !v.is_empty() {
|
||||
result.insert(k, v);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Convert a HashMap<String, String> to a Rhai Map
|
||||
pub fn hashmap_to_map(map: &HashMap<String, String>) -> Map {
|
||||
let mut result = Map::new();
|
||||
for (key, value) in map.iter() {
|
||||
result.insert(key.clone().into(), Dynamic::from(value.clone()));
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Convert a Rhai Array to a Vec of strings
|
||||
pub fn array_to_vec_string(array: &Array) -> Vec<String> {
|
||||
array.iter()
|
||||
.filter_map(|item| {
|
||||
let s = item.clone().to_string();
|
||||
if !s.is_empty() { Some(s) } else { None }
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Helper function to convert Dynamic to Option<String>
|
||||
pub fn dynamic_to_string_option(value: &Dynamic) -> Option<String> {
|
||||
if value.is_string() {
|
||||
Some(value.clone().to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to convert Dynamic to Option<u32>
|
||||
pub fn dynamic_to_u32_option(value: &Dynamic) -> Option<u32> {
|
||||
if value.is_int() {
|
||||
Some(value.as_int().unwrap() as u32)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to convert Dynamic to Option<&str> with lifetime management
|
||||
pub fn dynamic_to_str_option<'a>(value: &Dynamic, storage: &'a mut String) -> Option<&'a str> {
|
||||
if value.is_string() {
|
||||
*storage = value.clone().to_string();
|
||||
Some(storage.as_str())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
11
lib/ai/mcp/rhai/logic/templates/lib.rs
Normal file
11
lib/ai/mcp/rhai/logic/templates/lib.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
// Re-export the utility modules
|
||||
pub mod generic_wrapper;
|
||||
pub mod wrapper;
|
||||
pub mod engine;
|
||||
|
||||
// Re-export the utility traits and functions
|
||||
pub use generic_wrapper::{RhaiWrapper, map_to_hashmap, array_to_vec_string,
|
||||
dynamic_to_string_option, hashmap_to_map};
|
||||
pub use engine::create_rhai_engine;
|
||||
|
||||
// The create_rhai_engine function is now in the engine module
|
||||
22
lib/ai/mcp/rhai/mcp/command.v
Normal file
22
lib/ai/mcp/rhai/mcp/command.v
Normal file
@@ -0,0 +1,22 @@
|
||||
module mcp
|
||||
|
||||
import cli
|
||||
|
||||
pub const command = cli.Command{
|
||||
sort_flags: true
|
||||
name: 'rhai'
|
||||
// execute: cmd_mcpgen
|
||||
description: 'rhai command'
|
||||
commands: [
|
||||
cli.Command{
|
||||
name: 'start'
|
||||
execute: cmd_start
|
||||
description: 'start the Rhai server'
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
fn cmd_start(cmd cli.Command) ! {
|
||||
mut server := new_mcp_server()!
|
||||
server.start()!
|
||||
}
|
||||
33
lib/ai/mcp/rhai/mcp/mcp.v
Normal file
33
lib/ai/mcp/rhai/mcp/mcp.v
Normal file
@@ -0,0 +1,33 @@
|
||||
module mcp
|
||||
|
||||
import freeflowuniverse.herolib.ai.mcp
|
||||
import freeflowuniverse.herolib.schemas.jsonrpc
|
||||
import log
|
||||
|
||||
pub fn new_mcp_server() !&mcp.Server {
|
||||
log.info('Creating new Developer MCP server')
|
||||
|
||||
// Initialize the server with the empty handlers map
|
||||
mut server := mcp.new_server(mcp.MemoryBackend{
|
||||
tools: {
|
||||
'generate_rhai_wrapper': generate_rhai_wrapper_spec
|
||||
}
|
||||
tool_handlers: {
|
||||
'generate_rhai_wrapper': generate_rhai_wrapper_handler
|
||||
}
|
||||
prompts: {
|
||||
'rhai_wrapper': rhai_wrapper_prompt_spec
|
||||
}
|
||||
prompt_handlers: {
|
||||
'rhai_wrapper': rhai_wrapper_prompt_handler
|
||||
}
|
||||
}, mcp.ServerParams{
|
||||
config: mcp.ServerConfiguration{
|
||||
server_info: mcp.ServerInfo{
|
||||
name: 'rhai'
|
||||
version: '1.0.0'
|
||||
}
|
||||
}
|
||||
})!
|
||||
return server
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user