Files
herolib/examples/ai/openai/openai_two_model_pipeline.vsh
Mahmoud-Emad f6734a3568 chore: Remove openrouter client
- Remove call to openrouter.play from the main play function
- Used the OpenAI client instead
- Updated the examples
- Updated the README
2025-10-29 11:42:44 +03:00

135 lines
4.0 KiB
GLSL
Executable File

#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openai
import incubaid.herolib.core.playcmds
// Sample code file to be improved
const sample_code = '
def calculate_sum(numbers):
total = 0
for i in range(len(numbers)):
total = total + numbers[i]
return total
def find_max(lst):
max = lst[0]
for i in range(1, len(lst)):
if lst[i] > max:
max = lst[i]
return max
'
// Configure two OpenAI client instances to use OpenRouter with different models
// Model A: Enhancement model (Qwen Coder)
playcmds.run(
heroscript: '
!!openai.configure
name: "enhancer"
url: "https://openrouter.ai/api/v1"
model_default: "qwen/qwen-2.5-coder-32b-instruct"
'
)!
// Model B: Modification model (Llama 3.3 70B)
playcmds.run(
heroscript: '
!!openai.configure
name: "modifier"
url: "https://openrouter.ai/api/v1"
model_default: "meta-llama/llama-3.3-70b-instruct"
'
)!
mut enhancer := openai.get(name: 'enhancer') or { panic('Failed to get enhancer client: ${err}') }
mut modifier := openai.get(name: 'modifier') or { panic('Failed to get modifier client: ${err}') }
println(''.repeat(70))
println('🔧 Two-Model Code Enhancement Pipeline - Proof of Concept')
println('🔧 Using OpenAI client configured for OpenRouter')
println(''.repeat(70))
println('')
// Step 1: Get enhancement suggestions from Model A (Qwen Coder)
println('📝 STEP 1: Code Enhancement Analysis')
println(''.repeat(70))
println('Model: qwen/qwen-2.5-coder-32b-instruct')
println('Task: Analyze code and suggest improvements\n')
enhancement_prompt := 'You are a code enhancement agent.
Your job is to analyze the following Python code and propose improvements or fixes.
Output your response as **pure edits or diffs only**, not a full rewritten file.
Focus on:
- Performance improvements
- Pythonic idioms
- Bug fixes
- Code clarity
Here is the code to analyze:
${sample_code}
Provide specific edit instructions or diffs.'
println('🤖 Sending to enhancement model...')
enhancement_result := enhancer.chat_completion(
message: enhancement_prompt
temperature: 0.3
max_completion_tokens: 2000
) or {
eprintln(' Enhancement failed: ${err}')
return
}
println('\n Enhancement suggestions received:')
println(''.repeat(70))
println(enhancement_result.result)
println(''.repeat(70))
println('Tokens used: ${enhancement_result.usage.total_tokens}\n')
// Step 2: Apply edits using Model B (Llama 3.3 70B)
println('\n📝 STEP 2: Apply Code Modifications')
println(''.repeat(70))
println('Model: meta-llama/llama-3.3-70b-instruct')
println('Task: Apply the suggested edits to produce updated code\n')
modification_prompt := 'You are a file editing agent.
Apply the given edits or diffs to the provided file.
Output the updated Python code only, without comments or explanations.
ORIGINAL CODE:
${sample_code}
EDITS TO APPLY:
${enhancement_result.result}
Output only the final, updated Python code.'
println('🤖 Sending to modification model...')
modification_result := modifier.chat_completion(
message: modification_prompt
temperature: 0.1
max_completion_tokens: 2000
) or {
eprintln(' Modification failed: ${err}')
return
}
println('\n Modified code received:')
println(''.repeat(70))
println(modification_result.result)
println(''.repeat(70))
println('Tokens used: ${modification_result.usage.total_tokens}\n')
// Summary
println('\n📊 PIPELINE SUMMARY')
println(''.repeat(70))
println('Original code length: ${sample_code.len} chars')
println('Enhancement model: qwen/qwen-2.5-coder-32b-instruct')
println('Enhancement tokens: ${enhancement_result.usage.total_tokens}')
println('Modification model: meta-llama/llama-3.3-70b-instruct')
println('Modification tokens: ${modification_result.usage.total_tokens}')
println('Total tokens: ${enhancement_result.usage.total_tokens +
modification_result.usage.total_tokens}')
println(''.repeat(70))
println('\n Two-model pipeline completed successfully!')