This commit is contained in:
2025-11-08 11:12:16 +04:00
parent 5a6f3d323b
commit f40565c571
9 changed files with 357 additions and 14 deletions

36
lib/ai/client/README.md Normal file
View File

@@ -0,0 +1,36 @@
# AIClient Factory
This directory contains the implementation of the `AIClient` factory, which provides a unified interface for interacting with various Large Language Model (LLM) providers such as Groq and OpenRouter. It leverages the existing OpenAI client infrastructure to abstract away the differences between providers.
## File Structure
- [`aiclient.v`](lib/ai/client/aiclient.v): The main factory and core functions for the `AIClient`.
- [`aiclient_models.v`](lib/ai/client/aiclient_models.v): Defines LLM model enums and their mapping to specific model names and API base URLs.
- [`aiclient_llm.v`](lib/ai/client/aiclient_llm.v): Handles the initialization of various LLM provider clients.
- [`aiclient_embed.v`](lib/ai/client/aiclient_embed.v): Provides functions for generating embeddings using the configured LLM models.
- [`aiclient_write.v`](lib/ai/client/aiclient_write.v): Implements complex file writing logic, including backup, AI-driven modification, content validation, and retry mechanisms.
- [`aiclient_validate.v`](lib/ai/client/aiclient_validate.v): Contains placeholder functions for validating different file types (Vlang, Markdown, YAML, JSON).
## Usage
To use the `AIClient`, you first need to initialize it:
```v
import aiclient
mut client := aiclient.new()!
```
Ensure that the necessary environment variables (`GROQKEY` and `OPENROUTER_API_KEY`) are set for the LLM providers.
## Environment Variables
- `GROQKEY`: API key for Groq.
- `OPENROUTER_API_KEY`: API key for OpenRouter.
## Key Features
```bash
v install prantlf.yaml
v install markdown
```

17
lib/ai/client/aiclient.v Normal file
View File

@@ -0,0 +1,17 @@
module client
import incubaid.herolib.core.pathlib
@[heap]
pub struct AIClient {
pub mut:
llms AIClientLLMs
// Add other fields as needed
}
pub fn new() !AIClient {
llms := llms_init()!
return AIClient{
llms: llms
}
}

View File

@@ -0,0 +1,5 @@
module client
// pub fn (mut ac AIClient) embed(txt string) ![]f32 {
// return ac.llms.llm_embed.embeddings(txt)!
// }

View File

@@ -0,0 +1,104 @@
module client
import incubaid.herolib.clients.openai
import os
pub struct AIClientLLMs {
pub mut:
llm_maverick &openai.OpenAI
llm_qwen &openai.OpenAI
llm_120b &openai.OpenAI
llm_best &openai.OpenAI
llm_flash &openai.OpenAI
llm_pro &openai.OpenAI
llm_morph &openai.OpenAI
llm_embed &openai.OpenAI
}
// Initialize all LLM clients
pub fn llms_init() !AIClientLLMs {
groq_key := os.getenv('GROQKEY')
if groq_key.len == 0 {
return error('GROQKEY environment variable not set')
}
openrouter_key := os.getenv('OPENROUTER_API_KEY')
if openrouter_key.len == 0 {
return error('OPENROUTER_API_KEY environment variable not set')
}
mut maverick_client := openai.OpenAI{
name: 'maverick'
api_key: groq_key
url: 'https://api.groq.com/openai/v1'
model_default: 'meta-llama/llama-4-maverick-17b-128e-instruct'
}
openai.set(maverick_client)!
mut qwen_client := openai.OpenAI{
name: 'qwen'
api_key: groq_key
url: 'https://api.groq.com/openai/v1'
model_default: 'qwen/qwen3-32b'
}
openai.set(qwen_client)!
mut llm_120b_client := openai.OpenAI{
name: 'llm_120b'
api_key: groq_key
url: 'https://api.groq.com/openai/v1'
model_default: 'openai/gpt-oss-120b'
}
openai.set(llm_120b_client)!
mut best_client := openai.OpenAI{
name: 'best'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'anthropic/claude-haiku-4.5'
}
openai.set(best_client)!
mut flash_client := openai.OpenAI{
name: 'flash'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'google/gemini-2.5-flash'
}
openai.set(flash_client)!
mut pro_client := openai.OpenAI{
name: 'pro'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'google/gemini-2.5-pro'
}
openai.set(pro_client)!
mut morph_client := openai.OpenAI{
name: 'morph'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'morph/morph-v3-fast'
}
openai.set(morph_client)!
mut embed_client := openai.OpenAI{
name: 'embed'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'qwen/qwen3-embedding-0.6b'
}
openai.set(embed_client)!
return AIClientLLMs{
llm_maverick: openai.get(name: 'maverick')!
llm_qwen: openai.get(name: 'qwen')!
llm_120b: openai.get(name: 'llm_120b')!
llm_best: openai.get(name: 'best')!
llm_flash: openai.get(name: 'flash')!
llm_pro: openai.get(name: 'pro')!
llm_morph: openai.get(name: 'morph')!
llm_embed: openai.get(name: 'embed')!
}
}

View File

@@ -0,0 +1,26 @@
module client
pub enum LLMEnum {
maverick
qwen
embed
llm_120b
best
flash
pro
morph
}
fn llm_to_model_url(model LLMEnum) !(string, string) {
// Returns tuple: (model_name, base_url)
return match model {
.maverick { 'meta-llama/llama-4-maverick-17b-128e-instruct', 'https://api.groq.com/openai/v1' }
.qwen { 'qwen/qwen3-32b', 'https://api.groq.com/openai/v1' }
.embed { 'qwen/qwen3-embedding-0.6b', 'https://api.openrouter.ai/api/v1' }
.llm_120b { 'openai/gpt-oss-120b', 'https://api.groq.com/openai/v1' }
.best { 'anthropic/claude-haiku-4.5', 'https://api.openrouter.ai/api/v1' }
.flash { 'google/gemini-2.5-flash', 'https://api.openrouter.ai/api/v1' }
.pro { 'google/gemini-2.5-pro', 'https://api.openrouter.ai/api/v1' }
.morph { 'morph/morph-v3-fast', 'https://api.openrouter.ai/api/v1' }
}
}

View File

@@ -0,0 +1,45 @@
module client
import incubaid.herolib.core.pathlib
import markdown
import os
import prantlf.yaml { parse_text }
import x.json2
pub fn validate_vlang_content(path pathlib.Path) !string {
// Use `v fmt -check` to validate V language syntax
// If there are any formatting issues, `v fmt -check` will return a non-zero exit code
// and print the issues to stderr.
res := os.system('v fmt -check ${path.str()}')
if res != 0 {
return 'V language syntax validation failed. Please check the file for errors.'
}
// TODO: do 'v filepath' d and check if errors return, if no, then remove the compiled binary if its there, if it goes wrong do same
return '' // empty means no error
}
pub fn validate_markdown_content(path_ pathlib.Path) !string {
// Implement Markdown validation by attempting to convert to HTML
// If there's an error during conversion, it indicates invalid Markdown.
mut mypath := path_
content := mypath.read() or { return 'Failed to read markdown file: ${err}' }
mut xx := markdown.HtmlRenderer{}
_ := markdown.render(content, mut xx) or { return 'Invalid Markdown content: ${err}' }
return '' // empty means no error
}
pub fn validate_yaml_content(path_ pathlib.Path) !string {
// Implement YAML validation by attempting to load the content
mut mypath := path_
content := mypath.read() or { return 'Failed to read YAML file: ${err}' }
_ := parse_text(content) or { return 'Invalid YAML content: ${err}' }
return '' // empty means no error
}
pub fn validate_json_content(path_ pathlib.Path) !string {
// Implement JSON validation by attempting to decode the content
mut mypath := path_
content := mypath.read() or { return 'Failed to read JSON file: ${err}' }
json2.decode[json2.Any](content) or { return 'Invalid JSON content: ${err}' }
return '' // empty means no error
}

View File

@@ -0,0 +1,76 @@
module client
import incubaid.herolib.core.pathlib
import incubaid.herolib.ui.console
import incubaid.herolib.clients.openai
import os
// TODO: do as params for the function
pub fn (mut ac AIClient) write_from_prompt(path_ pathlib.Path, prompt string, models []LLMEnum) ! {
mut mypath := path_
original_content := mypath.read()!
mut backup_path := pathlib.get_file(path: '${mypath.path}.backup', create: true)!
backup_path.write(original_content)!
mut selected_models := models.clone()
if selected_models.len == 0 {
selected_models = [.best] // Default to best model if none provided
}
for model_enum in selected_models {
model_name, base_url := llm_to_model_url(model_enum)!
mut llm_client := openai.get(name: model_enum.str())! // Assuming model_enum.str() matches the name used in llms_init
// 3. Use first model (or default best) to process prompt
// This part needs to be implemented based on how the OpenAI client's chat completion works
// For now, let's assume a simple completion call
// This is a placeholder and needs actual implementation based on the OpenAI client's chat completion method
// For example:
// completion := llm_client.chat_completion(prompt)!
// instructions := completion.choices[0].message.content
// For now, let's just use the prompt as the "instructions" for modification
instructions := prompt
// 5. Use morph model to merge original + instructions
// This is a placeholder for the merging logic
// For now, let's just replace the content with instructions
new_content := instructions // This needs to be replaced with actual merging logic
// 6. Validate content based on file extension
mut validation_error := ''
match mypath.ext()! {
'.v' {
validation_error = validate_vlang_content(mypath)!
}
'.md' {
validation_error = validate_markdown_content(mypath)!
}
'.yaml', '.yml' {
validation_error = validate_yaml_content(mypath)!
}
'.json' {
validation_error = validate_json_content(mypath)!
}
else {
// No specific validation for other file types
}
}
if validation_error == '' {
// Validation passed - write new content
mypath.write(new_content)!
backup_path.delete()! // Remove backup on success
return
} else {
console.print_stderr('Validation failed for model ${model_name}. Error: ${validation_error}. Trying next model...')
}
}
// 8. If all fail, restore .backup and error
original_backup := backup_path.read()!
mypath.write(original_backup)!
backup_path.delete()!
return error('All models failed to generate valid content. Original file restored.')
}