implement groq example using openai client

This commit is contained in:
Timur Gordon
2025-03-14 23:07:13 +01:00
parent 02e4ea180d
commit ef922d162e
7 changed files with 130 additions and 15 deletions

View File

@@ -0,0 +1 @@
export GROQ_API_KEY="your-groq-api-key-here"

View File

@@ -0,0 +1,64 @@
# Groq AI Client Example
This example demonstrates how to use Groq's AI API with the herolib OpenAI client. Groq provides API compatibility with OpenAI's client libraries, allowing you to leverage Groq's fast inference speeds with minimal changes to your existing code.
## Prerequisites
- V programming language installed
- A Groq API key (get one from [Groq's website](https://console.groq.com/keys))
## Setup
1. Copy the `.env.example` file to `.env`:
```bash
cp .env.example .env
```
2. Edit the `.env` file and replace `your-groq-api-key-here` with your actual Groq API key.
3. Load the environment variables:
```bash
source .env
```
## Running the Example
Execute the script with:
```bash
v run groq_client.vsh
```
Or make it executable first:
```bash
chmod +x groq_client.vsh
./groq_client.vsh
```
## How It Works
The example uses the existing OpenAI client from herolib but configures it to use Groq's API endpoint:
1. It retrieves the Groq API key from the environment variables
2. Configures the OpenAI client with the Groq API key
3. Overrides the default OpenAI URL with Groq's API URL (`https://api.groq.com/openai/v1`)
4. Sends a chat completion request to Groq's API
5. Displays the response
## Supported Models
Groq supports various models including:
- llama2-70b-4096
- mixtral-8x7b-32768
- gemma-7b-it
For a complete and up-to-date list of supported models, refer to the [Groq API documentation](https://console.groq.com/docs/models).
## Notes
- The example uses the `gpt_3_5_turbo` enum from the OpenAI client, but Groq will automatically map this to an appropriate model on their end.
- For production use, you may want to explicitly specify one of Groq's supported models.

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
module main
import freeflowuniverse.herolib.clients.openai
import os
fn main() {
// Get API key from environment variable
key := os.getenv('GROQ_API_KEY')
if key == '' {
println('Error: GROQ_API_KEY environment variable not set')
println('Please set it by running: source .env')
exit(1)
}
// Get the configured client
mut client := openai.OpenAI {
name: 'groq'
api_key: key
server_url: 'https://api.groq.com/openai/v1'
}
// Define the model and message for chat completion
// Note: Use a model that Groq supports, like llama2-70b-4096 or mixtral-8x7b-32768
model := 'qwen-2.5-coder-32b'
// Create a chat completion request
res := client.chat_completion(model, openai.Messages{
messages: [
openai.Message{
role: .user
content: 'What are the key differences between Groq and other AI inference providers?'
}
]
})!
// Print the response
println('\nGroq AI Response:')
println('==================')
println(res.choices[0].message.content)
println('\nUsage Statistics:')
println('Prompt tokens: ${res.usage.prompt_tokens}')
println('Completion tokens: ${res.usage.completion_tokens}')
println('Total tokens: ${res.usage.total_tokens}')
}

View File

@@ -11,7 +11,7 @@ msg << op.Message{
mut msgs := op.Messages{
messages: msg
}
res := ai_cli.chat_completion(op.ModelType.gpt_3_5_turbo, msgs)!
res := ai_cli.chat_completion(op.ModelType.gpt_3_5_turbo.str(), msgs)!
print(res)
models := ai_cli.list_models()!

View File

@@ -10,7 +10,7 @@ fn test_chat_completion() {
mut client := get()!
res := client.chat_completion(.gpt_4o_2024_08_06, Messages{
res := client.chat_completion(.gpt_4o_2024_08_06.str(), Messages{
messages: [
Message{
role: .user

View File

@@ -50,10 +50,9 @@ mut:
// creates a new chat completion given a list of messages
// each message consists of message content and the role of the author
pub fn (mut f OpenAI) chat_completion(model_type ModelType, msgs Messages) !ChatCompletion {
model_type0 := modelname_str(model_type)
pub fn (mut f OpenAI) chat_completion(model_type string, msgs Messages) !ChatCompletion {
mut m := ChatMessagesRaw{
model: model_type0
model: model_type
}
for msg in msgs.messages {
mr := MessageRaw{

View File

@@ -25,18 +25,18 @@ pub struct OpenAI {
pub mut:
name string = 'default'
api_key string @[secret]
server_url string
conn ?&httpconnection.HTTPConnection
}
fn cfg_play(p paramsparser.Params) ! {
// THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above
mut mycfg := OpenAI{
name: p.get_default('name', 'default')!
api_key: p.get('api_key')!
}
set(mycfg)!
}
// fn cfg_play(p paramsparser.Params) ! {
// // THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above
// mut mycfg := OpenAI{
// name: p.get_default('name', 'default')!
// api_key: p.get('api_key')!
// }
// set(mycfg)!
// }
fn obj_init(obj_ OpenAI) !OpenAI {
// never call get here, only thing we can do here is work on object itself
@@ -45,10 +45,15 @@ fn obj_init(obj_ OpenAI) !OpenAI {
}
pub fn (mut client OpenAI) connection() !&httpconnection.HTTPConnection {
server_url := if client.server_url != '' {
client.server_url
} else {
'https://api.openai.com/v1'
}
mut c := client.conn or {
mut c2 := httpconnection.new(
name: 'openaiconnection_${client.name}'
url: 'https://api.openai.com/v1'
url: server_url
cache: false
retry: 20
)!