refactor: Update OpenRouter client and examples

- Add error handling for client initialization
- Improve example scripts for clarity and robustness
- Refine client configuration and usage patterns
- Update documentation with current examples and features
- Enhance model handling and response processing
This commit is contained in:
Mahmoud-Emad
2025-10-28 22:40:37 +03:00
parent d1c0c8f03e
commit 4222dac72e
12 changed files with 733 additions and 1 deletions

View File

@@ -0,0 +1,78 @@
# OpenRouter Examples - Proof of Concept
## Overview
This folder contains **three example scripts** demonstrating the usage of the OpenRouter V client (`herolib.clients.openrouter`).
* **Goal:** Show how to send messages to OpenRouter models, run a **two-model pipeline** for code enhancement, and illustrate multi-model usage.
---
## Example Scripts
### 1. `say_hello.vsh`
* **Purpose:** Simple hello message to OpenRouter.
* **Demonstrates:** Sending a single message using `client.chat_completion`.
* **Usage:**
```bash
examples/clients/openrouter/openrouter_hello.vsh
```
* **Expected output:** A friendly "hello" response from the AI and token usage.
---
### 2. `openrouter_example.vsh`
* **Purpose:** Demonstrates basic conversation features.
* **Demonstrates:**
* Sending a single message
* Using system + user messages for conversation context
* Printing token usage
* **Usage:**
```bash
examples/clients/openrouter/openrouter_example.vsh
```
* **Expected output:** Responses from the AI for both simple and system-prompt conversations.
---
### 3. `openrouter_two_model_pipeline.vsh`
* **Purpose:** Two-model code enhancement pipeline (proof of concept).
* **Demonstrates:**
* Model A (`Qwen3 Coder`) suggests code improvements.
* Model B (`morph-v3-fast`) applies the suggested edits.
* Tracks tokens and shows before/after code.
* **Usage:**
```bash
examples/clients/openrouter/openrouter_two_model_pipeline.vsh
```
* **Expected output:**
* Original code
* Suggested edits
* Final updated code
* Token usage summary
---
## Notes
1. Ensure your **OpenRouter API key** is set:
```bash
export OPENROUTER_API_KEY="sk-or-v1-..."
```
2. All scripts use the **same OpenRouter client** instance for simplicity, except the two-model pipeline which uses **two separate client instances** (one per model).
3. Scripts can be run individually using the `v -enable-globals run` command.
4. The two-model pipeline is a **proof of concept**; the flow can later be extended to multiple files or OpenRPC specs.

View File

@@ -0,0 +1,49 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openrouter
import incubaid.herolib.core.playcmds
// Get the client instance
mut client := openrouter.get()!
println('🤖 OpenRouter Client Example')
println(''.repeat(50))
println('')
// Example 1: Simple message
println('Example 1: Simple Hello')
println(''.repeat(50))
mut r := client.chat_completion(
model: 'qwen/qwen-2.5-coder-32b-instruct'
message: 'Say hello in a creative way!'
temperature: 0.7
max_completion_tokens: 150
)!
println('AI: ${r.result}')
println('Tokens: ${r.usage.total_tokens}\n')
// Example 2: Conversation with system prompt
println('Example 2: Conversation with System Prompt')
println(''.repeat(50))
r = client.chat_completion(
model: 'qwen/qwen-2.5-coder-32b-instruct'
messages: [
openrouter.Message{
role: .system
content: 'You are a helpful coding assistant who speaks concisely.'
},
openrouter.Message{
role: .user
content: 'What is V programming language?'
},
]
temperature: 0.3
max_completion_tokens: 200
)!
println('AI: ${r.result}')
println('Tokens: ${r.usage.total_tokens}\n')
println(''.repeat(50))
println(' Examples completed successfully!')

View File

@@ -0,0 +1,31 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openrouter
import incubaid.herolib.core.playcmds
// Get the client instance
mut client := openrouter.get() or {
eprintln('Failed to get client: ${err}')
return
}
println('Sending message to OpenRouter...\n')
// Simple hello message
response := client.chat_completion(
model: 'qwen/qwen-2.5-coder-32b-instruct'
message: 'Say hello in a friendly way!'
temperature: 0.7
max_completion_tokens: 100
) or {
eprintln('Failed to get completion: ${err}')
return
}
println('Response from AI:')
println('─'.repeat(50))
println(response.result)
println('─'.repeat(50))
println('\nTokens used: ${response.usage.total_tokens}')
println(' - Prompt: ${response.usage.prompt_tokens}')
println(' - Completion: ${response.usage.completion_tokens}')

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.clients.openrouter
import incubaid.herolib.core.playcmds
// Sample code file to be improved
const sample_code = '
def calculate_sum(numbers):
total = 0
for i in range(len(numbers)):
total = total + numbers[i]
return total
def find_max(lst):
max = lst[0]
for i in range(1, len(lst)):
if lst[i] > max:
max = lst[i]
return max
'
mut modifier := openrouter.get(name: 'modifier', create: true) or {
panic('Failed to get modifier client: ${err}')
}
mut enhancer := openrouter.get(name: 'enhancer', create: true) or {
panic('Failed to get enhancer client: ${err}')
}
println('═'.repeat(70))
println('🔧 Two-Model Code Enhancement Pipeline - Proof of Concept')
println('═'.repeat(70))
println('')
// Step 1: Get enhancement suggestions from Model A (Qwen3 Coder 480B)
println('📝 STEP 1: Code Enhancement Analysis')
println('─'.repeat(70))
println('Model: Qwen3 Coder 480B A35B')
println('Task: Analyze code and suggest improvements\n')
enhancement_prompt := 'You are a code enhancement agent.
Your job is to analyze the following Python code and propose improvements or fixes.
Output your response as **pure edits or diffs only**, not a full rewritten file.
Focus on:
- Performance improvements
- Pythonic idioms
- Bug fixes
- Code clarity
Here is the code to analyze:
${sample_code}
Provide specific edit instructions or diffs.'
println('🤖 Sending to enhancement model...')
enhancement_result := enhancer.chat_completion(
message: enhancement_prompt
temperature: 0.3
max_completion_tokens: 2000
) or {
eprintln(' Enhancement failed: ${err}')
return
}
println('\n Enhancement suggestions received:')
println('─'.repeat(70))
println(enhancement_result.result)
println('─'.repeat(70))
println('Tokens used: ${enhancement_result.usage.total_tokens}\n')
// Step 2: Apply edits using Model B (morph-v3-fast)
println('\n📝 STEP 2: Apply Code Modifications')
println('─'.repeat(70))
println('Model: morph-v3-fast')
println('Task: Apply the suggested edits to produce updated code\n')
modification_prompt := 'You are a file editing agent.
Apply the given edits or diffs to the provided file.
Output the updated Python code only, without comments or explanations.
ORIGINAL CODE:
${sample_code}
EDITS TO APPLY:
${enhancement_result.result}
Output only the final, updated Python code.'
println('🤖 Sending to modification model...')
modification_result := modifier.chat_completion(
message: modification_prompt
temperature: 0.1
max_completion_tokens: 2000
) or {
eprintln(' Modification failed: ${err}')
return
}
println('\n Modified code received:')
println('─'.repeat(70))
println(modification_result.result)
println('─'.repeat(70))
println('Tokens used: ${modification_result.usage.total_tokens}\n')
// Summary
println('\n📊 PIPELINE SUMMARY')
println('═'.repeat(70))
println('Original code length: ${sample_code.len} chars')
println('Enhancement model: qwen/qwq-32b-preview (Qwen3 Coder 480B A35B)')
println('Enhancement tokens: ${enhancement_result.usage.total_tokens}')
println('Modification model: neversleep/llama-3.3-70b-instruct (morph-v3-fast)')
println('Modification tokens: ${modification_result.usage.total_tokens}')
println('Total tokens: ${enhancement_result.usage.total_tokens +
modification_result.usage.total_tokens}')
println('═'.repeat(70))
println('\n Two-model pipeline completed successfully!')

View File

@@ -0,0 +1,7 @@
!!hero_code.generate_client
name:'openrouter'
classname:'OpenRouter'
singleton:0
default:1
hasconfig:1
reset:0

View File

@@ -0,0 +1,13 @@
module openrouter
fn test_factory() {
mut client := get(name: 'default', create: true)!
assert client.name == 'default'
assert client.url == 'https://openrouter.ai/api/v1'
assert client.model_default == 'qwen/qwen-2.5-coder-32b-instruct'
}
fn test_client_creation() {
mut client := new(name: 'test_client')!
assert client.name == 'test_client'
}

View File

@@ -0,0 +1,98 @@
module openrouter
import json
@[params]
pub struct CompletionArgs {
pub mut:
model string
messages []Message // optional because we can use message, which means we just pass a string
message string
temperature f64 = 0.2
max_completion_tokens int = 32000
}
pub struct Message {
pub mut:
role RoleType
content string
}
pub enum RoleType {
system
user
assistant
function
}
fn roletype_str(x RoleType) string {
return match x {
.system {
'system'
}
.user {
'user'
}
.assistant {
'assistant'
}
.function {
'function'
}
}
}
pub struct ChatCompletion {
pub mut:
id string
created u32
result string
usage Usage
}
// creates a new chat completion given a list of messages
// each message consists of message content and the role of the author
pub fn (mut f OpenRouter) chat_completion(args_ CompletionArgs) !ChatCompletion {
mut args := args_
if args.model == '' {
args.model = f.model_default
}
mut m := ChatMessagesRaw{
model: args.model
temperature: args.temperature
max_completion_tokens: args.max_completion_tokens
}
for msg in args.messages {
mr := MessageRaw{
role: roletype_str(msg.role)
content: msg.content
}
m.messages << mr
}
if args.message != '' {
mr := MessageRaw{
role: 'user'
content: args.message
}
m.messages << mr
}
data := json.encode(m)
mut conn := f.connection()!
r := conn.post_json_str(prefix: 'chat/completions', data: data)!
res := json.decode(ChatCompletionRaw, r)!
mut result := ''
for choice in res.choices {
result += choice.message.content
}
mut chat_completion_result := ChatCompletion{
id: res.id
created: res.created
result: result
usage: res.usage
}
return chat_completion_result
}

View File

@@ -0,0 +1,139 @@
module openrouter
import incubaid.herolib.core.base
import incubaid.herolib.core.playbook { PlayBook }
import json
__global (
openrouter_global map[string]&OpenRouter
openrouter_default string
)
/////////FACTORY
@[params]
pub struct ArgsGet {
pub mut:
name string = 'default'
fromdb bool // will load from filesystem
create bool // default will not create if not exist
}
pub fn new(args ArgsGet) !&OpenRouter {
mut obj := OpenRouter{
name: args.name
}
set(obj)!
return get(name: args.name)!
}
pub fn get(args ArgsGet) !&OpenRouter {
mut context := base.context()!
openrouter_default = args.name
if args.fromdb || args.name !in openrouter_global {
mut r := context.redis()!
if r.hexists('context:openrouter', args.name)! {
data := r.hget('context:openrouter', args.name)!
if data.len == 0 {
print_backtrace()
return error('OpenRouter with name: openrouter does not exist, prob bug.')
}
mut obj := json.decode(OpenRouter, data)!
set_in_mem(obj)!
} else {
if args.create {
new(args)!
} else {
print_backtrace()
return error("OpenRouter with name 'openrouter' does not exist")
}
}
return get(name: args.name)! // no longer from db nor create
}
return openrouter_global[args.name] or {
print_backtrace()
return error('could not get config for openrouter with name:openrouter')
}
}
// register the config for the future
pub fn set(o OpenRouter) ! {
mut o2 := set_in_mem(o)!
openrouter_default = o2.name
mut context := base.context()!
mut r := context.redis()!
r.hset('context:openrouter', o2.name, json.encode(o2))!
}
// does the config exists?
pub fn exists(args ArgsGet) !bool {
mut context := base.context()!
mut r := context.redis()!
return r.hexists('context:openrouter', args.name)!
}
pub fn delete(args ArgsGet) ! {
mut context := base.context()!
mut r := context.redis()!
r.hdel('context:openrouter', args.name)!
}
@[params]
pub struct ArgsList {
pub mut:
fromdb bool // will load from filesystem
}
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
pub fn list(args ArgsList) ![]&OpenRouter {
mut res := []&OpenRouter{}
mut context := base.context()!
if args.fromdb {
// reset what is in mem
openrouter_global = map[string]&OpenRouter{}
openrouter_default = ''
}
if args.fromdb {
mut r := context.redis()!
mut l := r.hkeys('context:openrouter')!
for name in l {
res << get(name: name, fromdb: true)!
}
return res
} else {
// load from memory
for _, client in openrouter_global {
res << client
}
}
return res
}
// only sets in mem, does not set as config
fn set_in_mem(o OpenRouter) !OpenRouter {
mut o2 := obj_init(o)!
openrouter_global[o2.name] = &o2
openrouter_default = o2.name
return o2
}
pub fn play(mut plbook PlayBook) ! {
if !plbook.exists(filter: 'openrouter.') {
return
}
mut install_actions := plbook.find(filter: 'openrouter.configure')!
if install_actions.len > 0 {
for mut install_action in install_actions {
heroscript := install_action.heroscript()
mut obj2 := heroscript_loads(heroscript)!
set(obj2)!
install_action.done = true
}
}
}
// switch instance to be used for openrouter
pub fn switch(name string) {
openrouter_default = name
}

View File

@@ -0,0 +1,67 @@
module openrouter
import incubaid.herolib.data.encoderhero
import incubaid.herolib.core.httpconnection
import os
pub const version = '0.0.0'
const singleton = false
const default = true
@[heap]
pub struct OpenRouter {
pub mut:
name string = 'default'
api_key string
url string = 'https://openrouter.ai/api/v1'
model_default string = 'qwen/qwen-2.5-coder-32b-instruct'
}
// your checking & initialization code if needed
fn obj_init(mycfg_ OpenRouter) !OpenRouter {
mut mycfg := mycfg_
if mycfg.model_default == '' {
k := os.getenv('OPENROUTER_AI_MODEL')
if k != '' {
mycfg.model_default = k
}
}
if mycfg.url == '' {
k := os.getenv('OPENROUTER_URL')
if k != '' {
mycfg.url = k
}
}
if mycfg.api_key == '' {
k := os.getenv('OPENROUTER_API_KEY')
if k != '' {
mycfg.api_key = k
} else {
return error('OPENROUTER_API_KEY environment variable not set')
}
}
return mycfg
}
pub fn (mut client OpenRouter) connection() !&httpconnection.HTTPConnection {
mut c2 := httpconnection.new(
name: 'openrouterconnection_${client.name}'
url: client.url
cache: false
retry: 20
)!
c2.default_header.set(.authorization, 'Bearer ${client.api_key}')
return c2
}
/////////////NORMALLY NO NEED TO TOUCH
pub fn heroscript_dumps(obj OpenRouter) !string {
return encoderhero.encode[OpenRouter](obj)!
}
pub fn heroscript_loads(heroscript string) !OpenRouter {
mut obj := encoderhero.decode[OpenRouter](heroscript)!
return obj
}

View File

@@ -0,0 +1,38 @@
module openrouter
struct ChatCompletionRaw {
mut:
id string
object string
created u32
choices []ChoiceRaw
usage Usage
}
struct ChoiceRaw {
mut:
index int
message MessageRaw
finish_reason string
}
struct MessageRaw {
mut:
role string
content string
}
struct ChatMessagesRaw {
mut:
model string
messages []MessageRaw
temperature f64 = 0.5
max_completion_tokens int = 32000
}
pub struct Usage {
pub mut:
prompt_tokens int
completion_tokens int
total_tokens int
}

View File

@@ -0,0 +1,97 @@
# OpenRouter V Client
A V client for the OpenRouter API, providing access to multiple AI models through a unified interface.
## Quick Start
```v
import incubaid.herolib.clients.openrouter
import incubaid.herolib.core.playcmds
// Configure client (key can be read from env vars)
playcmds.run(
heroscript: '
!!openrouter.configure name:"default"
key:"${YOUR_OPENROUTER_KEY}"
url:"https://openrouter.ai/api/v1"
model_default:"qwen/qwen-2.5-coder-32b-instruct"
'
reset: false
)!
mut client := openrouter.get()!
// Simple chat example
resp := client.chat_completion(
model: "qwen/qwen-2.5-coder-32b-instruct"
message: "Hello, world!"
temperature: 0.6
)!
println('Answer: ${resp.result}')
```
## Environment Variables
The client automatically reads API keys from environment variables if not explicitly configured:
- `OPENROUTER_API_KEY` - OpenRouter API key
- `AIKEY` - Alternative API key variable
- `AIURL` - API base URL (defaults to `https://openrouter.ai/api/v1`)
- `AIMODEL` - Default model (defaults to `qwen/qwen-2.5-coder-32b-instruct`)
## Example with Multiple Messages
```v
import incubaid.herolib.clients.openrouter
mut client := openrouter.get()!
resp := client.chat_completion(
messages: [
openrouter.Message{
role: .system
content: 'You are a helpful coding assistant.'
},
openrouter.Message{
role: .user
content: 'Write a hello world in V'
},
]
temperature: 0.3
max_completion_tokens: 1024
)!
println(resp.result)
```
## Configuration via Heroscript
```hero
!!openrouter.configure
name: "default"
key: "sk-or-v1-..."
url: "https://openrouter.ai/api/v1"
model_default: "qwen/qwen-2.5-coder-32b-instruct"
```
## Features
- **Chat Completion**: Generate text completions using various AI models
- **Multiple Models**: Access to OpenRouter's extensive model catalog
- **Environment Variable Support**: Automatic configuration from environment
- **Factory Pattern**: Manage multiple client instances
- **Retry Logic**: Built-in retry mechanism for failed requests
## Available Models
OpenRouter provides access to many models including:
- `qwen/qwen-2.5-coder-32b-instruct` - Qwen 2.5 Coder (default)
- `anthropic/claude-3.5-sonnet`
- `openai/gpt-4-turbo`
- `google/gemini-pro`
- `meta-llama/llama-3.1-70b-instruct`
- And many more...
Check the [OpenRouter documentation](https://openrouter.ai/docs) for the full list of available models.

View File

@@ -1,6 +1,5 @@
module gittools
import time
import incubaid.herolib.ui.console
import incubaid.herolib.core.texttools
import incubaid.herolib.core