This commit is contained in:
2025-11-22 11:58:46 +02:00
parent a080fa8330
commit 3d8effeac7
10 changed files with 111 additions and 103 deletions

17
examples/ai/aiclient.vsh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
import incubaid.herolib.ai.client
mut cl := client.new()!
// response := cl.llms.llm_local.chat_completion(
// message: 'Explain quantum computing in simple terms'
// temperature: 0.5
// max_completion_tokens: 1024
// )!
response := cl.llms.llm_embed_local.embed(input: [
'The food was delicious and the waiter..',
])!
println(response)

View File

@@ -1,7 +1,5 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
module main
import incubaid.herolib.clients.openai
import os
import incubaid.herolib.core.playcmds
@@ -10,8 +8,8 @@ import incubaid.herolib.core.playcmds
playcmds.run(
heroscript: '
!!openai.configure name:"groq"
url:"https://api.groq.com/openai/v1"
!!openai.configure name:"groq"
url:"https://api.groq.com/openai/v1"
model_default:"openai/gpt-oss-120b"
'
reset: true

View File

@@ -1,7 +1,5 @@
module client
import incubaid.herolib.core.pathlib
@[heap]
pub struct AIClient {
pub mut:

View File

@@ -5,14 +5,16 @@ import os
pub struct AIClientLLMs {
pub mut:
llm_maverick &openai.OpenAI
llm_qwen &openai.OpenAI
llm_120b &openai.OpenAI
llm_best &openai.OpenAI
llm_flash &openai.OpenAI
llm_pro &openai.OpenAI
llm_morph &openai.OpenAI
llm_embed &openai.OpenAI
llm_maverick &openai.OpenAI
llm_qwen &openai.OpenAI
llm_120b &openai.OpenAI
llm_best &openai.OpenAI
llm_flash &openai.OpenAI
llm_pro &openai.OpenAI
llm_morph &openai.OpenAI
llm_embed &openai.OpenAI
llm_local &openai.OpenAI
llm_embed_local &openai.OpenAI
}
// Initialize all LLM clients
@@ -71,7 +73,7 @@ pub fn llms_init() !AIClientLLMs {
name: 'pro'
api_key: openrouter_key
url: 'https://api.openrouter.ai/api/v1'
model_default: 'google/gemini-2.5-pro'
model_default: 'google/gemini-3.0-pro'
}
openai.set(pro_client)!
@@ -91,14 +93,30 @@ pub fn llms_init() !AIClientLLMs {
}
openai.set(embed_client)!
mut local_client := openai.OpenAI{
name: 'local'
url: 'http://localhost:1234/v1'
model_default: 'google/gemma-3-12b'
}
openai.set(local_client)!
mut local_embed_client := openai.OpenAI{
name: 'embedlocal'
url: 'http://localhost:1234/v1'
model_default: 'text-embedding-nomic-embed-text-v1.5:2'
}
openai.set(local_embed_client)!
return AIClientLLMs{
llm_maverick: openai.get(name: 'maverick')!
llm_qwen: openai.get(name: 'qwen')!
llm_120b: openai.get(name: 'llm_120b')!
llm_best: openai.get(name: 'best')!
llm_flash: openai.get(name: 'flash')!
llm_pro: openai.get(name: 'pro')!
llm_morph: openai.get(name: 'morph')!
llm_embed: openai.get(name: 'embed')!
llm_maverick: openai.get(name: 'maverick')!
llm_qwen: openai.get(name: 'qwen')!
llm_120b: openai.get(name: 'llm_120b')!
llm_best: openai.get(name: 'best')!
llm_flash: openai.get(name: 'flash')!
llm_pro: openai.get(name: 'pro')!
llm_morph: openai.get(name: 'morph')!
llm_embed: openai.get(name: 'embed')!
llm_local: openai.get(name: 'local')!
llm_embed_local: openai.get(name: 'embedlocal')!
}
}

View File

@@ -9,6 +9,7 @@ pub enum LLMEnum {
flash
pro
morph
local
}
fn llm_to_model_url(model LLMEnum) !(string, string) {
@@ -22,5 +23,6 @@ fn llm_to_model_url(model LLMEnum) !(string, string) {
.flash { 'google/gemini-2.5-flash', 'https://api.openrouter.ai/api/v1' }
.pro { 'google/gemini-2.5-pro', 'https://api.openrouter.ai/api/v1' }
.morph { 'morph/morph-v3-fast', 'https://api.openrouter.ai/api/v1' }
.local { 'google/gemma-3-12b', 'http://localhost:1234/v1' }
}
}

View File

@@ -84,8 +84,10 @@ pub fn (mut f OpenAI) chat_completion(args_ CompletionArgs) !ChatCompletion {
m.messages << mr
}
data := json.encode(m)
// println('data: ${data}')
println('data: ${data}')
mut conn := f.connection()!
println(conn)
r := conn.post_json_str(prefix: 'chat/completions', data: data)!
res := json.decode(ChatCompletionRaw, r)!

View File

@@ -0,0 +1,49 @@
module openai
import json
// pub enum EmbeddingModel {
// text_embedding_ada
// }
// fn embedding_model_str(e EmbeddingModel) string {
// return match e {
// .text_embedding_ada {
// 'text-embedding-ada-002'
// }
// }
// }
@[params]
pub struct EmbeddingCreateRequest {
pub mut:
input []string @[required]
model string
user string
}
pub struct Embedding {
pub mut:
object string
embedding []f32
index int
}
pub struct EmbeddingResponse {
pub mut:
object string
data []Embedding
model string
usage Usage
}
pub fn (mut f OpenAI) embed(args_ EmbeddingCreateRequest) !EmbeddingResponse {
mut args := args_
if args.model == '' {
args.model = f.model_default
}
data := json.encode(args)
mut conn := f.connection()!
r := conn.post_json_str(prefix: 'embeddings', data: data)!
return json.decode(EmbeddingResponse, r)!
}

View File

@@ -1,17 +0,0 @@
# Quick Example: Creating Embeddings
```v
import incubaid.herolib.clients.openai
mut client:= openai.get()! //will be the default client, key is in `AIKEY` on environment variable or `OPENROUTER_API_KEY`
text_to_embed := 'The quick brown fox jumps over the lazy dog.'
resp := client.embeddings.create_embedding(
input: text_to_embed,
model: 'text-embedding-ada-002'
)!
```

View File

@@ -1,59 +0,0 @@
module embeddings
import json
import incubaid.herolib.clients.openai { OpenAI, Usage }
type OpenAIAlias = OpenAI
pub enum EmbeddingModel {
text_embedding_ada
}
fn embedding_model_str(e EmbeddingModel) string {
return match e {
.text_embedding_ada {
'text-embedding-ada-002'
}
}
}
@[params]
pub struct EmbeddingCreateArgs {
input []string @[required]
model EmbeddingModel @[required]
user string
}
pub struct EmbeddingCreateRequest {
input []string
model string
user string
}
pub struct Embedding {
pub mut:
object string
embedding []f32
index int
}
pub struct EmbeddingResponse {
pub mut:
object string
data []Embedding
model string
usage Usage
}
pub fn (mut f OpenAIAlias) create_embeddings(args EmbeddingCreateArgs) !EmbeddingResponse {
req := EmbeddingCreateRequest{
input: args.input
model: embedding_model_str(args.model)
user: args.user
}
data := json.encode(req)
mut conn := f.connection()!
r := conn.post_json_str(prefix: 'embeddings', data: data)!
return json.decode(EmbeddingResponse, r)!
}

View File

@@ -52,9 +52,9 @@ fn obj_init(mycfg_ OpenAI) !OpenAI {
}
}
}
if mycfg.api_key == '' {
return error('OpenAI client "${mycfg.name}" missing api_key')
}
// if mycfg.api_key == '' {
// return error('OpenAI client "${mycfg.name}" missing api_key')
// }
return mycfg
}