chore: Remove openrouter client
- Remove call to openrouter.play from the main play function - Used the OpenAI client instead - Updated the examples - Updated the README
This commit is contained in:
120
examples/ai/openai/README.md
Normal file
120
examples/ai/openai/README.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# OpenRouter Examples - Proof of Concept
|
||||
|
||||
## Overview
|
||||
|
||||
This folder contains **example scripts** demonstrating how to use the **OpenAI client** (`herolib.clients.openai`) configured to work with **OpenRouter**.
|
||||
|
||||
* **Goal:** Show how to send messages to OpenRouter models using the OpenAI client, run a **two-model pipeline** for code enhancement, and illustrate multi-model usage.
|
||||
* **Key Insight:** The OpenAI client is OpenRouter-compatible by design - simply configure it with OpenRouter's base URL (`https://openrouter.ai/api/v1`) and API key.
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
All examples configure the OpenAI client to use OpenRouter by setting:
|
||||
|
||||
* **URL**: `https://openrouter.ai/api/v1`
|
||||
* **API Key**: Read from `OPENROUTER_API_KEY` environment variable
|
||||
* **Model**: OpenRouter model IDs (e.g., `qwen/qwen-2.5-coder-32b-instruct`)
|
||||
|
||||
Example configuration:
|
||||
|
||||
```v
|
||||
playcmds.run(
|
||||
heroscript: '
|
||||
!!openai.configure
|
||||
name: "default"
|
||||
url: "https://openrouter.ai/api/v1"
|
||||
model_default: "qwen/qwen-2.5-coder-32b-instruct"
|
||||
'
|
||||
)!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example Scripts
|
||||
|
||||
### 1. `openai_init.vsh`
|
||||
|
||||
* **Purpose:** Basic initialization example showing OpenAI client configured for OpenRouter.
|
||||
* **Demonstrates:** Client configuration and simple chat completion.
|
||||
* **Usage:**
|
||||
|
||||
```bash
|
||||
examples/ai/openai/openai_init.vsh
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. `openai_hello.vsh`
|
||||
|
||||
* **Purpose:** Simple hello message to OpenRouter.
|
||||
* **Demonstrates:** Sending a single message using `client.chat_completion`.
|
||||
* **Usage:**
|
||||
|
||||
```bash
|
||||
examples/ai/openai/openai_hello.vsh
|
||||
```
|
||||
|
||||
* **Expected output:** A friendly "hello" response from the AI and token usage.
|
||||
|
||||
---
|
||||
|
||||
### 3. `openai_example.vsh`
|
||||
|
||||
* **Purpose:** Demonstrates basic conversation features.
|
||||
* **Demonstrates:**
|
||||
* Sending a single message
|
||||
* Using system + user messages for conversation context
|
||||
* Printing token usage
|
||||
* **Usage:**
|
||||
|
||||
```bash
|
||||
examples/ai/openai/openai_example.vsh
|
||||
```
|
||||
|
||||
* **Expected output:** Responses from the AI for both simple and system-prompt conversations.
|
||||
|
||||
---
|
||||
|
||||
### 4. `openai_two_model_pipeline.vsh`
|
||||
|
||||
* **Purpose:** Two-model code enhancement pipeline (proof of concept).
|
||||
* **Demonstrates:**
|
||||
* Model A (`Qwen3 Coder`) suggests code improvements.
|
||||
* Model B (`morph-v3-fast`) applies the suggested edits.
|
||||
* Tracks tokens and shows before/after code.
|
||||
* Using two separate OpenAI client instances with different models
|
||||
* **Usage:**
|
||||
|
||||
```bash
|
||||
examples/ai/openai/openai_two_model_pipeline.vsh
|
||||
```
|
||||
|
||||
* **Expected output:**
|
||||
* Original code
|
||||
* Suggested edits
|
||||
* Final updated code
|
||||
* Token usage summary
|
||||
|
||||
---
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Set your OpenRouter API key before running the examples:
|
||||
|
||||
```bash
|
||||
export OPENROUTER_API_KEY="sk-or-v1-..."
|
||||
```
|
||||
|
||||
The OpenAI client automatically detects when the URL contains "openrouter" and will use the `OPENROUTER_API_KEY` environment variable.
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
1. **No separate OpenRouter client needed** - The OpenAI client is fully compatible with OpenRouter's API.
|
||||
2. All scripts configure the OpenAI client with OpenRouter's base URL.
|
||||
3. The two-model pipeline uses **two separate client instances** (one per model) to demonstrate multi-model workflows.
|
||||
4. Scripts can be run individually using the `v -enable-globals run` command.
|
||||
5. The two-model pipeline is a **proof of concept**; the flow can later be extended to multiple files or OpenRPC specs.
|
||||
@@ -1,12 +1,22 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import incubaid.herolib.clients.openrouter
|
||||
import incubaid.herolib.clients.openai
|
||||
import incubaid.herolib.core.playcmds
|
||||
|
||||
// Get the client instance
|
||||
mut client := openrouter.get()!
|
||||
// Configure OpenAI client to use OpenRouter
|
||||
playcmds.run(
|
||||
heroscript: '
|
||||
!!openai.configure
|
||||
name: "default"
|
||||
url: "https://openrouter.ai/api/v1"
|
||||
model_default: "qwen/qwen-2.5-coder-32b-instruct"
|
||||
'
|
||||
)!
|
||||
|
||||
println('🤖 OpenRouter Client Example')
|
||||
// Get the client instance
|
||||
mut client := openai.get()!
|
||||
|
||||
println('🤖 OpenRouter Client Example (using OpenAI client)')
|
||||
println('═'.repeat(50))
|
||||
println('')
|
||||
|
||||
@@ -29,11 +39,11 @@ println('─'.repeat(50))
|
||||
r = client.chat_completion(
|
||||
model: 'qwen/qwen-2.5-coder-32b-instruct'
|
||||
messages: [
|
||||
openrouter.Message{
|
||||
openai.Message{
|
||||
role: .system
|
||||
content: 'You are a helpful coding assistant who speaks concisely.'
|
||||
},
|
||||
openrouter.Message{
|
||||
openai.Message{
|
||||
role: .user
|
||||
content: 'What is V programming language?'
|
||||
},
|
||||
@@ -1,10 +1,20 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import incubaid.herolib.clients.openrouter
|
||||
import incubaid.herolib.clients.openai
|
||||
import incubaid.herolib.core.playcmds
|
||||
|
||||
// Configure OpenAI client to use OpenRouter
|
||||
playcmds.run(
|
||||
heroscript: '
|
||||
!!openai.configure
|
||||
name: "default"
|
||||
url: "https://openrouter.ai/api/v1"
|
||||
model_default: "qwen/qwen-2.5-coder-32b-instruct"
|
||||
'
|
||||
)!
|
||||
|
||||
// Get the client instance
|
||||
mut client := openrouter.get() or {
|
||||
mut client := openai.get() or {
|
||||
eprintln('Failed to get client: ${err}')
|
||||
return
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
import incubaid.herolib.clients.openai
|
||||
import incubaid.herolib.core.playcmds
|
||||
|
||||
//to set the API key, either set it here, or set the OPENAI_API_KEY environment variable
|
||||
// to set the API key, either set it here, or set the OPENAI_API_KEY environment variable
|
||||
|
||||
playcmds.run(
|
||||
heroscript: '
|
||||
@@ -20,3 +20,5 @@ mut r := client.chat_completion(
|
||||
temperature: 0.3
|
||||
max_completion_tokens: 1024
|
||||
)!
|
||||
|
||||
println(r.result)
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import incubaid.herolib.clients.openrouter
|
||||
import incubaid.herolib.clients.openai
|
||||
import incubaid.herolib.core.playcmds
|
||||
|
||||
// Sample code file to be improved
|
||||
@@ -19,23 +19,41 @@ def find_max(lst):
|
||||
return max
|
||||
'
|
||||
|
||||
mut modifier := openrouter.get(name: 'modifier', create: true) or {
|
||||
panic('Failed to get modifier client: ${err}')
|
||||
}
|
||||
// Configure two OpenAI client instances to use OpenRouter with different models
|
||||
// Model A: Enhancement model (Qwen Coder)
|
||||
playcmds.run(
|
||||
heroscript: '
|
||||
!!openai.configure
|
||||
name: "enhancer"
|
||||
url: "https://openrouter.ai/api/v1"
|
||||
model_default: "qwen/qwen-2.5-coder-32b-instruct"
|
||||
'
|
||||
)!
|
||||
|
||||
mut enhancer := openrouter.get(name: 'enhancer', create: true) or {
|
||||
panic('Failed to get enhancer client: ${err}')
|
||||
}
|
||||
// Model B: Modification model (Llama 3.3 70B)
|
||||
playcmds.run(
|
||||
heroscript: '
|
||||
!!openai.configure
|
||||
name: "modifier"
|
||||
url: "https://openrouter.ai/api/v1"
|
||||
model_default: "meta-llama/llama-3.3-70b-instruct"
|
||||
'
|
||||
)!
|
||||
|
||||
mut enhancer := openai.get(name: 'enhancer') or { panic('Failed to get enhancer client: ${err}') }
|
||||
|
||||
mut modifier := openai.get(name: 'modifier') or { panic('Failed to get modifier client: ${err}') }
|
||||
|
||||
println('═'.repeat(70))
|
||||
println('🔧 Two-Model Code Enhancement Pipeline - Proof of Concept')
|
||||
println('🔧 Using OpenAI client configured for OpenRouter')
|
||||
println('═'.repeat(70))
|
||||
println('')
|
||||
|
||||
// Step 1: Get enhancement suggestions from Model A (Qwen3 Coder 480B)
|
||||
// Step 1: Get enhancement suggestions from Model A (Qwen Coder)
|
||||
println('📝 STEP 1: Code Enhancement Analysis')
|
||||
println('─'.repeat(70))
|
||||
println('Model: Qwen3 Coder 480B A35B')
|
||||
println('Model: qwen/qwen-2.5-coder-32b-instruct')
|
||||
println('Task: Analyze code and suggest improvements\n')
|
||||
|
||||
enhancement_prompt := 'You are a code enhancement agent.
|
||||
@@ -68,10 +86,10 @@ println(enhancement_result.result)
|
||||
println('─'.repeat(70))
|
||||
println('Tokens used: ${enhancement_result.usage.total_tokens}\n')
|
||||
|
||||
// Step 2: Apply edits using Model B (morph-v3-fast)
|
||||
// Step 2: Apply edits using Model B (Llama 3.3 70B)
|
||||
println('\n📝 STEP 2: Apply Code Modifications')
|
||||
println('─'.repeat(70))
|
||||
println('Model: morph-v3-fast')
|
||||
println('Model: meta-llama/llama-3.3-70b-instruct')
|
||||
println('Task: Apply the suggested edits to produce updated code\n')
|
||||
|
||||
modification_prompt := 'You are a file editing agent.
|
||||
@@ -106,9 +124,9 @@ println('Tokens used: ${modification_result.usage.total_tokens}\n')
|
||||
println('\n📊 PIPELINE SUMMARY')
|
||||
println('═'.repeat(70))
|
||||
println('Original code length: ${sample_code.len} chars')
|
||||
println('Enhancement model: qwen/qwq-32b-preview (Qwen3 Coder 480B A35B)')
|
||||
println('Enhancement model: qwen/qwen-2.5-coder-32b-instruct')
|
||||
println('Enhancement tokens: ${enhancement_result.usage.total_tokens}')
|
||||
println('Modification model: neversleep/llama-3.3-70b-instruct (morph-v3-fast)')
|
||||
println('Modification model: meta-llama/llama-3.3-70b-instruct')
|
||||
println('Modification tokens: ${modification_result.usage.total_tokens}')
|
||||
println('Total tokens: ${enhancement_result.usage.total_tokens +
|
||||
modification_result.usage.total_tokens}')
|
||||
@@ -1,78 +0,0 @@
|
||||
# OpenRouter Examples - Proof of Concept
|
||||
|
||||
## Overview
|
||||
|
||||
This folder contains **example scripts** demonstrating the usage of the OpenRouter V client (`herolib.clients.openrouter`).
|
||||
|
||||
* **Goal:** Show how to send messages to OpenRouter models, run a **two-model pipeline** for code enhancement, and illustrate multi-model usage.
|
||||
|
||||
---
|
||||
|
||||
## Example Scripts
|
||||
|
||||
### 1. `say_hello.vsh`
|
||||
|
||||
* **Purpose:** Simple hello message to OpenRouter.
|
||||
* **Demonstrates:** Sending a single message using `client.chat_completion`.
|
||||
* **Usage:**
|
||||
|
||||
```bash
|
||||
examples/clients/openrouter/openrouter_hello.vsh
|
||||
```
|
||||
|
||||
* **Expected output:** A friendly "hello" response from the AI and token usage.
|
||||
|
||||
---
|
||||
|
||||
### 2. `openrouter_example.vsh`
|
||||
|
||||
* **Purpose:** Demonstrates basic conversation features.
|
||||
* **Demonstrates:**
|
||||
|
||||
* Sending a single message
|
||||
* Using system + user messages for conversation context
|
||||
* Printing token usage
|
||||
* **Usage:**
|
||||
|
||||
```bash
|
||||
examples/clients/openrouter/openrouter_example.vsh
|
||||
```
|
||||
|
||||
* **Expected output:** Responses from the AI for both simple and system-prompt conversations.
|
||||
|
||||
---
|
||||
|
||||
### 3. `openrouter_two_model_pipeline.vsh`
|
||||
|
||||
* **Purpose:** Two-model code enhancement pipeline (proof of concept).
|
||||
* **Demonstrates:**
|
||||
|
||||
* Model A (`Qwen3 Coder`) suggests code improvements.
|
||||
* Model B (`morph-v3-fast`) applies the suggested edits.
|
||||
* Tracks tokens and shows before/after code.
|
||||
* **Usage:**
|
||||
|
||||
```bash
|
||||
examples/clients/openrouter/openrouter_two_model_pipeline.vsh
|
||||
```
|
||||
|
||||
* **Expected output:**
|
||||
|
||||
* Original code
|
||||
* Suggested edits
|
||||
* Final updated code
|
||||
* Token usage summary
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
1. Ensure your **OpenRouter API key** is set:
|
||||
|
||||
```bash
|
||||
export OPENROUTER_API_KEY="sk-or-v1-..."
|
||||
```
|
||||
|
||||
2. All scripts use the **same OpenRouter client** instance for simplicity, except the two-model pipeline which uses **two separate client instances** (one per model).
|
||||
3. Scripts can be run individually using the `v -enable-globals run` command.
|
||||
4. The two-model pipeline is a **proof of concept**; the flow can later be extended to multiple files or OpenRPC specs.
|
||||
@@ -1,7 +0,0 @@
|
||||
!!hero_code.generate_client
|
||||
name:'openrouter'
|
||||
classname:'OpenRouter'
|
||||
singleton:0
|
||||
default:1
|
||||
hasconfig:1
|
||||
reset:0
|
||||
@@ -1,13 +0,0 @@
|
||||
module openrouter
|
||||
|
||||
fn test_factory() {
|
||||
mut client := get(name: 'default', create: true)!
|
||||
assert client.name == 'default'
|
||||
assert client.url == 'https://openrouter.ai/api/v1'
|
||||
assert client.model_default == 'qwen/qwen-2.5-coder-32b-instruct'
|
||||
}
|
||||
|
||||
fn test_client_creation() {
|
||||
mut client := new(name: 'test_client')!
|
||||
assert client.name == 'test_client'
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
module openrouter
|
||||
|
||||
import json
|
||||
|
||||
@[params]
|
||||
pub struct CompletionArgs {
|
||||
pub mut:
|
||||
model string
|
||||
messages []Message // optional because we can use message, which means we just pass a string
|
||||
message string
|
||||
temperature f64 = 0.2
|
||||
max_completion_tokens int = 32000
|
||||
}
|
||||
|
||||
pub struct Message {
|
||||
pub mut:
|
||||
role RoleType
|
||||
content string
|
||||
}
|
||||
|
||||
pub enum RoleType {
|
||||
system
|
||||
user
|
||||
assistant
|
||||
function
|
||||
}
|
||||
|
||||
fn roletype_str(x RoleType) string {
|
||||
return match x {
|
||||
.system {
|
||||
'system'
|
||||
}
|
||||
.user {
|
||||
'user'
|
||||
}
|
||||
.assistant {
|
||||
'assistant'
|
||||
}
|
||||
.function {
|
||||
'function'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ChatCompletion {
|
||||
pub mut:
|
||||
id string
|
||||
created u32
|
||||
result string
|
||||
usage Usage
|
||||
}
|
||||
|
||||
// creates a new chat completion given a list of messages
|
||||
// each message consists of message content and the role of the author
|
||||
pub fn (mut f OpenRouter) chat_completion(args_ CompletionArgs) !ChatCompletion {
|
||||
mut args := args_
|
||||
if args.model == '' {
|
||||
args.model = f.model_default
|
||||
}
|
||||
mut m := ChatMessagesRaw{
|
||||
model: args.model
|
||||
temperature: args.temperature
|
||||
max_completion_tokens: args.max_completion_tokens
|
||||
}
|
||||
for msg in args.messages {
|
||||
mr := MessageRaw{
|
||||
role: roletype_str(msg.role)
|
||||
content: msg.content
|
||||
}
|
||||
m.messages << mr
|
||||
}
|
||||
if args.message != '' {
|
||||
mr := MessageRaw{
|
||||
role: 'user'
|
||||
content: args.message
|
||||
}
|
||||
m.messages << mr
|
||||
}
|
||||
data := json.encode(m)
|
||||
mut conn := f.connection()!
|
||||
r := conn.post_json_str(prefix: 'chat/completions', data: data)!
|
||||
|
||||
res := json.decode(ChatCompletionRaw, r)!
|
||||
|
||||
mut result := ''
|
||||
for choice in res.choices {
|
||||
result += choice.message.content
|
||||
}
|
||||
|
||||
mut chat_completion_result := ChatCompletion{
|
||||
id: res.id
|
||||
created: res.created
|
||||
result: result
|
||||
usage: res.usage
|
||||
}
|
||||
return chat_completion_result
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
module openrouter
|
||||
|
||||
import incubaid.herolib.core.base
|
||||
import incubaid.herolib.core.playbook { PlayBook }
|
||||
import incubaid.herolib.ui.console
|
||||
import json
|
||||
|
||||
__global (
|
||||
openrouter_global map[string]&OpenRouter
|
||||
openrouter_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
fromdb bool // will load from filesystem
|
||||
create bool // default will not create if not exist
|
||||
}
|
||||
|
||||
pub fn new(args ArgsGet) !&OpenRouter {
|
||||
mut obj := OpenRouter{
|
||||
name: args.name
|
||||
}
|
||||
set(obj)!
|
||||
return get(name: args.name)!
|
||||
}
|
||||
|
||||
pub fn get(args ArgsGet) !&OpenRouter {
|
||||
mut context := base.context()!
|
||||
openrouter_default = args.name
|
||||
if args.fromdb || args.name !in openrouter_global {
|
||||
mut r := context.redis()!
|
||||
if r.hexists('context:openrouter', args.name)! {
|
||||
data := r.hget('context:openrouter', args.name)!
|
||||
if data.len == 0 {
|
||||
print_backtrace()
|
||||
return error('OpenRouter with name: ${args.name} does not exist, prob bug.')
|
||||
}
|
||||
mut obj := json.decode(OpenRouter, data)!
|
||||
set_in_mem(obj)!
|
||||
} else {
|
||||
if args.create {
|
||||
new(args)!
|
||||
} else {
|
||||
print_backtrace()
|
||||
return error("OpenRouter with name '${args.name}' does not exist")
|
||||
}
|
||||
}
|
||||
return get(name: args.name)! // no longer from db nor create
|
||||
}
|
||||
return openrouter_global[args.name] or {
|
||||
print_backtrace()
|
||||
return error('could not get config for openrouter with name:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o OpenRouter) ! {
|
||||
mut o2 := set_in_mem(o)!
|
||||
openrouter_default = o2.name
|
||||
mut context := base.context()!
|
||||
mut r := context.redis()!
|
||||
r.hset('context:openrouter', o2.name, json.encode(o2))!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut r := context.redis()!
|
||||
return r.hexists('context:openrouter', args.name)!
|
||||
}
|
||||
|
||||
pub fn delete(args ArgsGet) ! {
|
||||
mut context := base.context()!
|
||||
mut r := context.redis()!
|
||||
r.hdel('context:openrouter', args.name)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct ArgsList {
|
||||
pub mut:
|
||||
fromdb bool // will load from filesystem
|
||||
}
|
||||
|
||||
// if fromdb set: load from filesystem, and not from mem, will also reset what is in mem
|
||||
pub fn list(args ArgsList) ![]&OpenRouter {
|
||||
mut res := []&OpenRouter{}
|
||||
mut context := base.context()!
|
||||
if args.fromdb {
|
||||
// reset what is in mem
|
||||
openrouter_global = map[string]&OpenRouter{}
|
||||
openrouter_default = ''
|
||||
}
|
||||
if args.fromdb {
|
||||
mut r := context.redis()!
|
||||
mut l := r.hkeys('context:openrouter')!
|
||||
|
||||
for name in l {
|
||||
res << get(name: name, fromdb: true)!
|
||||
}
|
||||
return res
|
||||
} else {
|
||||
// load from memory
|
||||
for _, client in openrouter_global {
|
||||
res << client
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o OpenRouter) !OpenRouter {
|
||||
mut o2 := obj_init(o)!
|
||||
openrouter_global[o2.name] = &o2
|
||||
openrouter_default = o2.name
|
||||
return o2
|
||||
}
|
||||
|
||||
pub fn play(mut plbook PlayBook) ! {
|
||||
if !plbook.exists(filter: 'openrouter.') {
|
||||
return
|
||||
}
|
||||
mut install_actions := plbook.find(filter: 'openrouter.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for mut install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
install_action.done = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for openrouter
|
||||
pub fn switch(name string) {
|
||||
openrouter_default = name
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
module openrouter
|
||||
|
||||
import incubaid.herolib.data.encoderhero
|
||||
import incubaid.herolib.core.httpconnection
|
||||
import os
|
||||
|
||||
pub const version = '0.0.0'
|
||||
const singleton = false
|
||||
const default = true
|
||||
|
||||
@[heap]
|
||||
pub struct OpenRouter {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
api_key string
|
||||
url string = 'https://openrouter.ai/api/v1'
|
||||
model_default string = 'qwen/qwen-2.5-coder-32b-instruct'
|
||||
}
|
||||
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ OpenRouter) !OpenRouter {
|
||||
mut mycfg := mycfg_
|
||||
if mycfg.model_default == '' {
|
||||
k := os.getenv('OPENROUTER_AI_MODEL')
|
||||
if k != '' {
|
||||
mycfg.model_default = k
|
||||
}
|
||||
}
|
||||
|
||||
if mycfg.url == '' {
|
||||
k := os.getenv('OPENROUTER_URL')
|
||||
if k != '' {
|
||||
mycfg.url = k
|
||||
}
|
||||
}
|
||||
if mycfg.api_key == '' {
|
||||
k := os.getenv('OPENROUTER_API_KEY')
|
||||
if k != '' {
|
||||
mycfg.api_key = k
|
||||
} else {
|
||||
return error('OPENROUTER_API_KEY environment variable not set')
|
||||
}
|
||||
}
|
||||
return mycfg
|
||||
}
|
||||
|
||||
pub fn (mut client OpenRouter) connection() !&httpconnection.HTTPConnection {
|
||||
mut c2 := httpconnection.new(
|
||||
name: 'openrouterconnection_${client.name}'
|
||||
url: client.url
|
||||
cache: false
|
||||
retry: 20
|
||||
)!
|
||||
c2.default_header.set(.authorization, 'Bearer ${client.api_key}')
|
||||
return c2
|
||||
}
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj OpenRouter) !string {
|
||||
return encoderhero.encode[OpenRouter](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !OpenRouter {
|
||||
mut obj := encoderhero.decode[OpenRouter](heroscript)!
|
||||
return obj
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
module openrouter
|
||||
|
||||
struct ChatCompletionRaw {
|
||||
mut:
|
||||
id string
|
||||
object string
|
||||
created u32
|
||||
choices []ChoiceRaw
|
||||
usage Usage
|
||||
}
|
||||
|
||||
struct ChoiceRaw {
|
||||
mut:
|
||||
index int
|
||||
message MessageRaw
|
||||
finish_reason string
|
||||
}
|
||||
|
||||
struct MessageRaw {
|
||||
mut:
|
||||
role string
|
||||
content string
|
||||
}
|
||||
|
||||
struct ChatMessagesRaw {
|
||||
mut:
|
||||
model string
|
||||
messages []MessageRaw
|
||||
temperature f64 = 0.5
|
||||
max_completion_tokens int = 32000
|
||||
}
|
||||
|
||||
pub struct Usage {
|
||||
pub mut:
|
||||
prompt_tokens int
|
||||
completion_tokens int
|
||||
total_tokens int
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
# OpenRouter V Client
|
||||
|
||||
A V client for the OpenRouter API, providing access to multiple AI models through a unified interface.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```v
|
||||
import incubaid.herolib.clients.openrouter
|
||||
import incubaid.herolib.core.playcmds
|
||||
|
||||
// Configure client (key can be read from env vars)
|
||||
playcmds.run(
|
||||
heroscript: '
|
||||
!!openrouter.configure name:"default"
|
||||
key:"${YOUR_OPENROUTER_KEY}"
|
||||
url:"https://openrouter.ai/api/v1"
|
||||
model_default:"qwen/qwen-2.5-coder-32b-instruct"
|
||||
'
|
||||
reset: false
|
||||
)!
|
||||
|
||||
mut client := openrouter.get()!
|
||||
|
||||
// Simple chat example
|
||||
resp := client.chat_completion(
|
||||
model: "qwen/qwen-2.5-coder-32b-instruct"
|
||||
message: "Hello, world!"
|
||||
temperature: 0.6
|
||||
)!
|
||||
|
||||
println('Answer: ${resp.result}')
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
The client automatically reads API keys from environment variables if not explicitly configured:
|
||||
|
||||
- `OPENROUTER_API_KEY` - OpenRouter API key
|
||||
- `AIKEY` - Alternative API key variable
|
||||
- `AIURL` - API base URL (defaults to `https://openrouter.ai/api/v1`)
|
||||
- `AIMODEL` - Default model (defaults to `qwen/qwen-2.5-coder-32b-instruct`)
|
||||
|
||||
## Example with Multiple Messages
|
||||
|
||||
```v
|
||||
import incubaid.herolib.clients.openrouter
|
||||
|
||||
mut client := openrouter.get()!
|
||||
|
||||
resp := client.chat_completion(
|
||||
messages: [
|
||||
openrouter.Message{
|
||||
role: .system
|
||||
content: 'You are a helpful coding assistant.'
|
||||
},
|
||||
openrouter.Message{
|
||||
role: .user
|
||||
content: 'Write a hello world in V'
|
||||
},
|
||||
]
|
||||
temperature: 0.3
|
||||
max_completion_tokens: 1024
|
||||
)!
|
||||
|
||||
println(resp.result)
|
||||
```
|
||||
|
||||
## Configuration via Heroscript
|
||||
|
||||
```hero
|
||||
!!openrouter.configure
|
||||
name: "default"
|
||||
key: "sk-or-v1-..."
|
||||
url: "https://openrouter.ai/api/v1"
|
||||
model_default: "qwen/qwen-2.5-coder-32b-instruct"
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Chat Completion**: Generate text completions using various AI models
|
||||
- **Multiple Models**: Access to OpenRouter's extensive model catalog
|
||||
- **Environment Variable Support**: Automatic configuration from environment
|
||||
- **Factory Pattern**: Manage multiple client instances
|
||||
- **Retry Logic**: Built-in retry mechanism for failed requests
|
||||
|
||||
## Available Models
|
||||
|
||||
OpenRouter provides access to many models including:
|
||||
|
||||
- `qwen/qwen-2.5-coder-32b-instruct` - Qwen 2.5 Coder (default)
|
||||
- `anthropic/claude-3.5-sonnet`
|
||||
- `openai/gpt-4-turbo`
|
||||
- `google/gemini-pro`
|
||||
- `meta-llama/llama-3.1-70b-instruct`
|
||||
- And many more...
|
||||
|
||||
Check the [OpenRouter documentation](https://openrouter.ai/docs) for the full list of available models.
|
||||
@@ -10,7 +10,6 @@ import incubaid.herolib.clients.meilisearch
|
||||
import incubaid.herolib.clients.mycelium
|
||||
import incubaid.herolib.clients.mycelium_rpc
|
||||
import incubaid.herolib.clients.openai
|
||||
import incubaid.herolib.clients.openrouter
|
||||
import incubaid.herolib.clients.postgresql_client
|
||||
import incubaid.herolib.clients.qdrant
|
||||
import incubaid.herolib.clients.rcloneclient
|
||||
@@ -66,7 +65,6 @@ pub fn run_all(args_ PlayArgs) ! {
|
||||
mycelium.play(mut plbook)!
|
||||
mycelium_rpc.play(mut plbook)!
|
||||
openai.play(mut plbook)!
|
||||
openrouter.play(mut plbook)!
|
||||
postgresql_client.play(mut plbook)!
|
||||
qdrant.play(mut plbook)!
|
||||
rcloneclient.play(mut plbook)!
|
||||
|
||||
Reference in New Issue
Block a user