Merge branch 'development_actions007' into development_action007_mahmoud
# Conflicts: # lib/clients/jina/jina_client.v # lib/clients/jina/jina_factory_.v # lib/clients/jina/jina_model.v # lib/clients/jina/rank_api.v
This commit is contained in:
@@ -195,13 +195,13 @@ fn initname() !string
|
||||
e.g. systemd, bash, zinit
|
||||
fn ipaddr_pub_get() !string
|
||||
Returns the ipaddress as known on the public side is using resolver4.opendns.com
|
||||
fn is_linux() bool
|
||||
fn is_linux()! bool
|
||||
fn is_linux_arm()! bool
|
||||
fn is_linux_intel() bool
|
||||
fn is_osx() bool
|
||||
fn is_osx_arm() bool
|
||||
fn is_osx_intel() bool
|
||||
fn is_ubuntu() bool
|
||||
fn is_linux_intel()! bool
|
||||
fn is_osx()! bool
|
||||
fn is_osx_arm()! bool
|
||||
fn is_osx_intel()! bool
|
||||
fn is_ubuntu()! bool
|
||||
fn load_env_file(file_path string) !
|
||||
fn memdb_exists(key string) bool
|
||||
fn memdb_get(key string) string
|
||||
|
||||
1
cli/.gitignore
vendored
1
cli/.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
hero
|
||||
compile
|
||||
compile_upload
|
||||
vdo
|
||||
|
||||
12
cli/vdo.v
Normal file
12
cli/vdo.v
Normal file
@@ -0,0 +1,12 @@
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.mcp.v_do
|
||||
|
||||
fn main() {
|
||||
// Create and start the MCP server
|
||||
mut server := v_do.new_server()
|
||||
server.start() or {
|
||||
eprintln('Error starting server: $err')
|
||||
exit(1)
|
||||
}
|
||||
}
|
||||
209
examples/clients/qdrant_example.vsh
Normal file
209
examples/clients/qdrant_example.vsh
Normal file
@@ -0,0 +1,209 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.clients.qdrant
|
||||
import os
|
||||
import flag
|
||||
|
||||
mut fp := flag.new_flag_parser(os.args)
|
||||
fp.application('qdrant_example.vsh')
|
||||
fp.version('v0.1.0')
|
||||
fp.description('Example script demonstrating Qdrant client usage')
|
||||
fp.skip_executable()
|
||||
|
||||
help_requested := fp.bool('help', `h`, false, 'Show help message')
|
||||
|
||||
if help_requested {
|
||||
println(fp.usage())
|
||||
exit(0)
|
||||
}
|
||||
|
||||
additional_args := fp.finalize() or {
|
||||
eprintln(err)
|
||||
println(fp.usage())
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Initialize Qdrant client
|
||||
mut client := qdrant.get(name: 'default') or {
|
||||
// If client doesn't exist, create a new one
|
||||
mut new_client := qdrant.QdrantClient{
|
||||
name: 'default'
|
||||
url: 'http://localhost:6333'
|
||||
}
|
||||
qdrant.set(new_client) or {
|
||||
eprintln('Failed to set Qdrant client: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
new_client
|
||||
}
|
||||
|
||||
println('Connected to Qdrant at ${client.url}')
|
||||
|
||||
// Check if Qdrant is healthy
|
||||
is_healthy := client.health_check() or {
|
||||
eprintln('Failed to check Qdrant health: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
if !is_healthy {
|
||||
eprintln('Qdrant is not healthy')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Qdrant is healthy')
|
||||
|
||||
// Get service info
|
||||
service_info := client.get_service_info() or {
|
||||
eprintln('Failed to get service info: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Qdrant version: ${service_info.version}')
|
||||
|
||||
// Collection name for our example
|
||||
collection_name := 'example_collection'
|
||||
|
||||
// Check if collection exists and delete it if it does
|
||||
collections := client.list_collections() or {
|
||||
eprintln('Failed to list collections: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
if collection_name in collections.result {
|
||||
println('Collection ${collection_name} already exists, deleting it...')
|
||||
client.delete_collection(collection_name: collection_name) or {
|
||||
eprintln('Failed to delete collection: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
println('Collection deleted')
|
||||
}
|
||||
|
||||
// Create a new collection
|
||||
println('Creating collection ${collection_name}...')
|
||||
vectors_config := qdrant.VectorsConfig{
|
||||
size: 4 // Small size for example purposes
|
||||
distance: .cosine
|
||||
}
|
||||
|
||||
client.create_collection(
|
||||
collection_name: collection_name
|
||||
vectors: vectors_config
|
||||
) or {
|
||||
eprintln('Failed to create collection: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Collection created')
|
||||
|
||||
// Upsert some points
|
||||
println('Upserting points...')
|
||||
points := [
|
||||
qdrant.PointStruct{
|
||||
id: '1'
|
||||
vector: [f32(0.1), 0.2, 0.3, 0.4]
|
||||
payload: {
|
||||
'color': 'red'
|
||||
'category': 'furniture'
|
||||
'name': 'chair'
|
||||
}
|
||||
},
|
||||
qdrant.PointStruct{
|
||||
id: '2'
|
||||
vector: [f32(0.2), 0.3, 0.4, 0.5]
|
||||
payload: {
|
||||
'color': 'blue'
|
||||
'category': 'electronics'
|
||||
'name': 'laptop'
|
||||
}
|
||||
},
|
||||
qdrant.PointStruct{
|
||||
id: '3'
|
||||
vector: [f32(0.3), 0.4, 0.5, 0.6]
|
||||
payload: {
|
||||
'color': 'green'
|
||||
'category': 'food'
|
||||
'name': 'apple'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
client.upsert_points(
|
||||
collection_name: collection_name
|
||||
points: points
|
||||
wait: true
|
||||
) or {
|
||||
eprintln('Failed to upsert points: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Points upserted')
|
||||
|
||||
// Get collection info to verify points were added
|
||||
collection_info := client.get_collection(collection_name: collection_name) or {
|
||||
eprintln('Failed to get collection info: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Collection has ${collection_info.vectors_count} points')
|
||||
|
||||
// Search for points
|
||||
println('Searching for points similar to [0.1, 0.2, 0.3, 0.4]...')
|
||||
search_result := client.search(
|
||||
collection_name: collection_name
|
||||
vector: [f32(0.1), 0.2, 0.3, 0.4]
|
||||
limit: 3
|
||||
) or {
|
||||
eprintln('Failed to search points: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Search results:')
|
||||
for i, point in search_result.result {
|
||||
println(' ${i+1}. ID: ${point.id}, Score: ${point.score}')
|
||||
if payload := point.payload {
|
||||
println(' Name: ${payload['name']}')
|
||||
println(' Category: ${payload['category']}')
|
||||
println(' Color: ${payload['color']}')
|
||||
}
|
||||
}
|
||||
|
||||
// Search with filter
|
||||
println('\nSearching for points with category "electronics"...')
|
||||
filter := qdrant.Filter{
|
||||
must: [
|
||||
qdrant.FieldCondition{
|
||||
key: 'category'
|
||||
match: 'electronics'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
filtered_search := client.search(
|
||||
collection_name: collection_name
|
||||
vector: [f32(0.1), 0.2, 0.3, 0.4]
|
||||
filter: filter
|
||||
limit: 3
|
||||
) or {
|
||||
eprintln('Failed to search with filter: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Filtered search results:')
|
||||
for i, point in filtered_search.result {
|
||||
println(' ${i+1}. ID: ${point.id}, Score: ${point.score}')
|
||||
if payload := point.payload {
|
||||
println(' Name: ${payload['name']}')
|
||||
println(' Category: ${payload['category']}')
|
||||
println(' Color: ${payload['color']}')
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up - delete the collection
|
||||
println('\nCleaning up - deleting collection...')
|
||||
client.delete_collection(collection_name: collection_name) or {
|
||||
eprintln('Failed to delete collection: ${err}')
|
||||
exit(1)
|
||||
}
|
||||
|
||||
println('Collection deleted')
|
||||
println('Example completed successfully')
|
||||
9
examples/installers/db/qdrant.vsh
Executable file
9
examples/installers/db/qdrant.vsh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import freeflowuniverse.herolib.installers.db.qdrant as qdrant_installer
|
||||
|
||||
mut db := qdrant_installer.get()!
|
||||
|
||||
db.install()!
|
||||
db.start()!
|
||||
|
||||
@@ -3,5 +3,8 @@
|
||||
import freeflowuniverse.herolib.installers.lang.python as python_module
|
||||
|
||||
mut python_installer := python_module.get()!
|
||||
// python_installer.install()!
|
||||
python_installer.destroy()!
|
||||
python_installer.install()!
|
||||
|
||||
|
||||
|
||||
// python_installer.destroy()!
|
||||
|
||||
245
lib/clients/jina/jina_client.v
Normal file
245
lib/clients/jina/jina_client.v
Normal file
@@ -0,0 +1,245 @@
|
||||
module jina
|
||||
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
// import os
|
||||
import json
|
||||
|
||||
@[params]
|
||||
pub struct CreateEmbeddingParams {
|
||||
pub mut:
|
||||
input []string @[required] // Input texts
|
||||
model JinaModel @[required] // Model name
|
||||
task string @[required] // Task type
|
||||
type_ ?EmbeddingType // embedding type
|
||||
truncate ?TruncateType // truncation type
|
||||
late_chunking ?bool // Flag to determine if late chunking is applied
|
||||
}
|
||||
|
||||
// Create embeddings for input texts
|
||||
pub fn (mut j Jina) create_embeddings(params CreateEmbeddingParams) !ModelEmbeddingOutput {
|
||||
task := task_type_from_string(params.task)!
|
||||
|
||||
mut embedding_input := TextEmbeddingInput{
|
||||
input: params.input
|
||||
model: params.model.to_string()
|
||||
task: task
|
||||
}
|
||||
|
||||
if v := params.type_ {
|
||||
embedding_input.type_ = v
|
||||
}
|
||||
|
||||
if v := params.truncate {
|
||||
embedding_input.truncate = v
|
||||
}
|
||||
|
||||
embedding_input.late_chunking = if _ := params.late_chunking { true } else { false }
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .post
|
||||
prefix: 'v1/embeddings'
|
||||
dataformat: .json
|
||||
data: embedding_input.to_json()
|
||||
}
|
||||
|
||||
mut httpclient := j.httpclient()!
|
||||
response := httpclient.post_json_str(req)!
|
||||
return parse_model_embedding_output(response)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct RerankParams {
|
||||
pub mut:
|
||||
model JinaRerankModel @[required]
|
||||
query string @[required]
|
||||
documents []string @[required]
|
||||
top_n ?int // Optional: Number of top results to return
|
||||
return_documents ?bool // Optional: Flag to determine if the documents should be returned
|
||||
}
|
||||
|
||||
// Rerank documents based on a query
|
||||
pub fn (mut j Jina) rerank(params RerankParams) !RankingOutput {
|
||||
mut rank_input := RerankInput{
|
||||
model: params.model.to_string()
|
||||
query: params.query
|
||||
documents: params.documents
|
||||
}
|
||||
|
||||
if v := params.top_n {
|
||||
rank_input.top_n = v
|
||||
}
|
||||
|
||||
if v := params.return_documents {
|
||||
rank_input.return_documents = v
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .post
|
||||
prefix: 'v1/rerank'
|
||||
dataformat: .json
|
||||
data: json.encode(rank_input)
|
||||
}
|
||||
|
||||
mut httpclient := j.httpclient()!
|
||||
response := httpclient.post_json_str(req)!
|
||||
return json.decode(RankingOutput, response)!
|
||||
}
|
||||
|
||||
// // Create embeddings with a TextDoc input
|
||||
// pub fn (mut j Jina) create_embeddings_with_docs(args TextEmbeddingInput) !ModelEmbeddingOutput {
|
||||
|
||||
// req := httpconnection.Request{
|
||||
// method: .post
|
||||
// prefix: 'v1/embeddings'
|
||||
// dataformat: .json
|
||||
// data: json.encode(args)
|
||||
// }
|
||||
|
||||
// response := j.http.get(req)!
|
||||
// return parse_model_embedding_output(response)!
|
||||
// }
|
||||
|
||||
// // Rerank documents based on a query
|
||||
// pub fn (mut j Jina) rerank(query string, documents []string, model string, top_n int) !RankingOutput {
|
||||
// mut rank_input := RankAPIInput{
|
||||
// model: model
|
||||
// query: query
|
||||
// documents: documents
|
||||
// top_n: top_n
|
||||
// }
|
||||
|
||||
// req := httpconnection.Request{
|
||||
// method: .post
|
||||
// prefix: 'v1/rerank'
|
||||
// dataformat: .json
|
||||
// data: rank_input.to_json()
|
||||
// }
|
||||
|
||||
// response := j.http.get(req)!
|
||||
// return parse_ranking_output(response)!
|
||||
// }
|
||||
|
||||
// // Simplified rerank function with default top_n
|
||||
// pub fn (mut j Jina) rerank_simple(query string, documents []string, model string) !RankingOutput {
|
||||
// return j.rerank(query, documents, model, 0)!
|
||||
// }
|
||||
|
||||
// // Classify input texts
|
||||
// pub fn (mut j Jina) classify(input []string, model string, labels []string) !ClassificationOutput {
|
||||
// mut classification_input := ClassificationAPIInput{
|
||||
// model: model
|
||||
// input: input
|
||||
// labels: labels
|
||||
// }
|
||||
|
||||
// req := httpconnection.Request{
|
||||
// method: .post
|
||||
// prefix: 'v1/classify'
|
||||
// dataformat: .json
|
||||
// data: classification_input.to_json()
|
||||
// }
|
||||
|
||||
// response := j.http.get(req)!
|
||||
// return parse_classification_output(response)!
|
||||
// }
|
||||
|
||||
// // Train a classifier
|
||||
// pub fn (mut j Jina) train(examples []TrainingExample, model string, access string) !TrainingOutput {
|
||||
// mut training_input := TrainingAPIInput{
|
||||
// model: model
|
||||
// input: examples
|
||||
// access: access
|
||||
// }
|
||||
|
||||
// req := httpconnection.Request{
|
||||
// method: .post
|
||||
// prefix: 'v1/train'
|
||||
// dataformat: .json
|
||||
// data: training_input.to_json()
|
||||
// }
|
||||
|
||||
// response := j.http.get(req)!
|
||||
// return parse_training_output(response)!
|
||||
// }
|
||||
|
||||
// // List classifiers
|
||||
// pub fn (mut j Jina) list_classifiers() !string {
|
||||
// req := httpconnection.Request{
|
||||
// method: .get
|
||||
// prefix: 'v1/classifiers'
|
||||
// }
|
||||
|
||||
// return j.http.get(req)!
|
||||
// }
|
||||
|
||||
// // Delete a classifier
|
||||
// pub fn (mut j Jina) delete_classifier(classifier_id string) !bool {
|
||||
// req := httpconnection.Request{
|
||||
// method: .delete
|
||||
// prefix: 'v1/classifiers/${classifier_id}'
|
||||
// }
|
||||
|
||||
// j.http.get(req)!
|
||||
// return true
|
||||
// }
|
||||
|
||||
// // Create multi-vector embeddings
|
||||
// pub fn (mut j Jina) create_multi_vector(input []string, model string) !ColbertModelEmbeddingsOutput {
|
||||
// mut data := map[string]json.Any{}
|
||||
// data['model'] = model
|
||||
// data['input'] = input
|
||||
|
||||
// req := httpconnection.Request{
|
||||
// method: .post
|
||||
// prefix: 'v1/multi-embeddings'
|
||||
// dataformat: .json
|
||||
// data: json.encode(data)
|
||||
// }
|
||||
|
||||
// response := j.http.get(req)!
|
||||
// return parse_colbert_model_embeddings_output(response)!
|
||||
// }
|
||||
|
||||
// // Start a bulk embedding job
|
||||
// pub fn (mut j Jina) start_bulk_embedding(file_path string, model string, email string) !BulkEmbeddingJobResponse {
|
||||
// // This endpoint requires multipart/form-data which is not directly supported by the current HTTPConnection
|
||||
// // We need to implement a custom solution for this
|
||||
// return error('Bulk embedding is not implemented yet')
|
||||
// }
|
||||
|
||||
// // Check the status of a bulk embedding job
|
||||
// pub fn (mut j Jina) check_bulk_embedding_status(job_id string) !BulkEmbeddingJobResponse {
|
||||
// req := httpconnection.Request{
|
||||
// method: .get
|
||||
// prefix: 'v1/bulk-embeddings/${job_id}'
|
||||
// }
|
||||
|
||||
// response := j.http.get(req)!
|
||||
// return parse_bulk_embedding_job_response(response)!
|
||||
// }
|
||||
|
||||
// // Download the result of a bulk embedding job
|
||||
// pub fn (mut j Jina) download_bulk_embedding_result(job_id string) !DownloadResultResponse {
|
||||
// req := httpconnection.Request{
|
||||
// method: .post
|
||||
// prefix: 'v1/bulk-embeddings/${job_id}/download-result'
|
||||
// }
|
||||
|
||||
// response := j.http.get(req)!
|
||||
// return parse_download_result_response(response)!
|
||||
// }
|
||||
|
||||
// // Check if the API key is valid by making a simple request
|
||||
// pub fn (mut j Jina) check_auth() !bool {
|
||||
// req := httpconnection.Request{
|
||||
// method: .get
|
||||
// prefix: '/'
|
||||
// }
|
||||
|
||||
// j.http.get(req) or {
|
||||
// return error('Failed to connect to Jina API: ${err}')
|
||||
// }
|
||||
|
||||
// // If we get a response, the API key is valid
|
||||
// return true
|
||||
// }
|
||||
@@ -2,6 +2,7 @@ module jina
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
// import freeflowuniverse.herolib.ui.console
|
||||
|
||||
__global (
|
||||
jina_global map[string]&Jina
|
||||
|
||||
@@ -2,6 +2,7 @@ module jina
|
||||
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
// import net.http
|
||||
import os
|
||||
|
||||
pub const version = '0.0.0'
|
||||
|
||||
@@ -16,3 +16,26 @@ data = {
|
||||
|
||||
response = requests.post(url, headers=headers, data=json.dumps(data))
|
||||
print(response.json())
|
||||
|
||||
|
||||
#OTHER EXAMPLE WITH MORE ARGUMENTS
|
||||
|
||||
url = "https://s.jina.ai/"
|
||||
params = {
|
||||
"q": "Jina AI",
|
||||
"gl": "US",
|
||||
"hl": "en",
|
||||
"num": 10,
|
||||
"page": 1,
|
||||
"location": "gent"
|
||||
}
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
"Authorization": "Bearer jina_275aefb6495643408d4c499fce548080w5rYjijHfHVBi_vtAqNY6LBk-woz",
|
||||
"X-Return-Format": "markdown",
|
||||
"X-Timeout": "10"
|
||||
}
|
||||
|
||||
response = requests.get(url, params=params, headers=headers)
|
||||
|
||||
print(response.json())
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
module jina
|
||||
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import json
|
||||
// import json
|
||||
|
||||
pub enum JinaRerankModel {
|
||||
reranker_v2_base_multilingual // 278M
|
||||
@@ -66,41 +65,3 @@ pub fn jina_rerank_model_from_string(s string) !JinaRerankModel {
|
||||
else { error('Invalid JinaRerankModel string: ${s}') }
|
||||
}
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct RerankParams {
|
||||
pub mut:
|
||||
model JinaRerankModel @[required] // Model name
|
||||
query string @[required] // Query text
|
||||
documents []string @[required] // Document texts
|
||||
top_n ?int // Optional: Number of top results to return
|
||||
return_documents ?bool // Optional: Flag to determine if the documents should be returned
|
||||
}
|
||||
|
||||
// Rerank documents based on a query
|
||||
pub fn (mut j Jina) rerank(params RerankParams) !RankingOutput {
|
||||
mut rank_input := RerankInput{
|
||||
model: params.model.to_string()
|
||||
query: params.query
|
||||
documents: params.documents
|
||||
}
|
||||
|
||||
if v := params.top_n {
|
||||
rank_input.top_n = v
|
||||
}
|
||||
|
||||
if v := params.return_documents {
|
||||
rank_input.return_documents = v
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .post
|
||||
prefix: 'v1/rerank'
|
||||
dataformat: .json
|
||||
data: json.encode(rank_input)
|
||||
}
|
||||
|
||||
mut httpclient := j.httpclient()!
|
||||
response := httpclient.post_json_str(req)!
|
||||
return json.decode(RankingOutput, response)!
|
||||
}
|
||||
|
||||
8
lib/clients/qdrant/.heroscript
Normal file
8
lib/clients/qdrant/.heroscript
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
!!hero_code.generate_client
|
||||
name:'qdrant'
|
||||
classname:'QDrantClient'
|
||||
singleton:0
|
||||
default:1
|
||||
hasconfig:1
|
||||
reset:0
|
||||
14661
lib/clients/qdrant/openapi.json
Normal file
14661
lib/clients/qdrant/openapi.json
Normal file
File diff suppressed because it is too large
Load Diff
394
lib/clients/qdrant/qdrant_client.v
Normal file
394
lib/clients/qdrant/qdrant_client.v
Normal file
@@ -0,0 +1,394 @@
|
||||
module qdrant
|
||||
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import json
|
||||
// import os
|
||||
|
||||
// QdrantClient is the main client for interacting with the Qdrant API
|
||||
pub struct QdrantClient {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
secret string
|
||||
url string = 'http://localhost:6333'
|
||||
}
|
||||
|
||||
// httpclient creates a new HTTP connection to the Qdrant API
|
||||
fn (mut self QdrantClient) httpclient() !&httpconnection.HTTPConnection {
|
||||
mut http_conn := httpconnection.new(
|
||||
name: 'Qdrant_vclient'
|
||||
url: self.url
|
||||
)!
|
||||
|
||||
// Add authentication header if API key is provided
|
||||
if self.secret.len > 0 {
|
||||
http_conn.default_header.add(.api_key, self.secret)
|
||||
}
|
||||
return http_conn
|
||||
}
|
||||
|
||||
// Collections API
|
||||
|
||||
@[params]
|
||||
pub struct CreateCollectionParams {
|
||||
pub mut:
|
||||
collection_name string @[required]
|
||||
vectors VectorsConfig @[required]
|
||||
shard_number ?int
|
||||
replication_factor ?int
|
||||
write_consistency_factor ?int
|
||||
on_disk_payload ?bool
|
||||
hnsw_config ?HnswConfig
|
||||
optimizers_config ?OptimizersConfig
|
||||
wal_config ?WalConfig
|
||||
quantization_config ?QuantizationConfig
|
||||
init_from ?InitFrom
|
||||
timeout ?int
|
||||
}
|
||||
|
||||
// Create a new collection
|
||||
pub fn (mut q QdrantClient) create_collection(params CreateCollectionParams) !bool {
|
||||
mut collection_params := CollectionParams{
|
||||
vectors: params.vectors
|
||||
}
|
||||
|
||||
if v := params.shard_number {
|
||||
collection_params.shard_number = v
|
||||
}
|
||||
|
||||
if v := params.replication_factor {
|
||||
collection_params.replication_factor = v
|
||||
}
|
||||
|
||||
if v := params.write_consistency_factor {
|
||||
collection_params.write_consistency_factor = v
|
||||
}
|
||||
|
||||
if v := params.on_disk_payload {
|
||||
collection_params.on_disk_payload = v
|
||||
}
|
||||
|
||||
if v := params.hnsw_config {
|
||||
collection_params.hnsw_config = v
|
||||
}
|
||||
|
||||
if v := params.optimizers_config {
|
||||
collection_params.optimizers_config = v
|
||||
}
|
||||
|
||||
if v := params.wal_config {
|
||||
collection_params.wal_config = v
|
||||
}
|
||||
|
||||
if v := params.quantization_config {
|
||||
collection_params.quantization_config = v
|
||||
}
|
||||
|
||||
if v := params.init_from {
|
||||
collection_params.init_from = v
|
||||
}
|
||||
|
||||
mut query_params := map[string]string{}
|
||||
if v := params.timeout {
|
||||
query_params['timeout'] = v.str()
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .put
|
||||
prefix: 'collections/${params.collection_name}'
|
||||
dataformat: .json
|
||||
data: json.encode(collection_params)
|
||||
params: query_params
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
result := json.decode(OperationResponse, response.body)!
|
||||
return result.result
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct ListCollectionsParams {
|
||||
pub mut:
|
||||
timeout ?int
|
||||
}
|
||||
|
||||
// List all collections
|
||||
pub fn (mut q QdrantClient) list_collections(params ListCollectionsParams) !CollectionsResponse {
|
||||
mut query_params := map[string]string{}
|
||||
if v := params.timeout {
|
||||
query_params['timeout'] = v.str()
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .get
|
||||
prefix: 'collections'
|
||||
params: query_params
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
return json.decode(CollectionsResponse, response.body)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct DeleteCollectionParams {
|
||||
pub mut:
|
||||
collection_name string @[required]
|
||||
timeout ?int
|
||||
}
|
||||
|
||||
// Delete a collection
|
||||
pub fn (mut q QdrantClient) delete_collection(params DeleteCollectionParams) !bool {
|
||||
mut query_params := map[string]string{}
|
||||
if v := params.timeout {
|
||||
query_params['timeout'] = v.str()
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .delete
|
||||
prefix: 'collections/${params.collection_name}'
|
||||
params: query_params
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
result := json.decode(OperationResponse, response.body)!
|
||||
return result.result
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct GetCollectionParams {
|
||||
pub mut:
|
||||
collection_name string @[required]
|
||||
timeout ?int
|
||||
}
|
||||
|
||||
// Get collection info
|
||||
pub fn (mut q QdrantClient) get_collection(params GetCollectionParams) !CollectionInfo {
|
||||
mut query_params := map[string]string{}
|
||||
if v := params.timeout {
|
||||
query_params['timeout'] = v.str()
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .get
|
||||
prefix: 'collections/${params.collection_name}'
|
||||
params: query_params
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
result := json.decode(CollectionInfoResponse, response.body)!
|
||||
return result.result
|
||||
}
|
||||
|
||||
// Points API
|
||||
|
||||
@[params]
|
||||
pub struct UpsertPointsParams {
|
||||
pub mut:
|
||||
collection_name string @[required]
|
||||
points []PointStruct @[required]
|
||||
wait ?bool
|
||||
ordering ?WriteOrdering
|
||||
}
|
||||
|
||||
// Upsert points
|
||||
pub fn (mut q QdrantClient) upsert_points(params UpsertPointsParams) !PointsOperationResponse {
|
||||
mut query_params := map[string]string{}
|
||||
if v := params.wait {
|
||||
query_params['wait'] = v.str()
|
||||
}
|
||||
|
||||
mut request_body := map[string]json.Any{}
|
||||
request_body['points'] = params.points
|
||||
|
||||
if v := params.ordering {
|
||||
request_body['ordering'] = v
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .put
|
||||
prefix: 'collections/${params.collection_name}/points'
|
||||
dataformat: .json
|
||||
data: json.encode(request_body)
|
||||
params: query_params
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
return json.decode(PointsOperationResponse, response.body)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct DeletePointsParams {
|
||||
pub mut:
|
||||
collection_name string @[required]
|
||||
points_selector PointsSelector @[required]
|
||||
wait ?bool
|
||||
ordering ?WriteOrdering
|
||||
}
|
||||
|
||||
// Delete points
|
||||
pub fn (mut q QdrantClient) delete_points(params DeletePointsParams) !PointsOperationResponse {
|
||||
mut query_params := map[string]string{}
|
||||
if v := params.wait {
|
||||
query_params['wait'] = v.str()
|
||||
}
|
||||
|
||||
mut request_body := map[string]json.Any{}
|
||||
|
||||
if params.points_selector.points != none {
|
||||
request_body['points'] = params.points_selector.points
|
||||
} else if params.points_selector.filter != none {
|
||||
request_body['filter'] = params.points_selector.filter
|
||||
}
|
||||
|
||||
if v := params.ordering {
|
||||
request_body['ordering'] = v
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .post
|
||||
prefix: 'collections/${params.collection_name}/points/delete'
|
||||
dataformat: .json
|
||||
data: json.encode(request_body)
|
||||
params: query_params
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
return json.decode(PointsOperationResponse, response.body)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct GetPointParams {
|
||||
pub mut:
|
||||
collection_name string @[required]
|
||||
id string @[required]
|
||||
with_payload ?WithPayloadSelector
|
||||
with_vector ?WithVector
|
||||
}
|
||||
|
||||
// Get a point by ID
|
||||
pub fn (mut q QdrantClient) get_point(params GetPointParams) !GetPointResponse {
|
||||
mut query_params := map[string]string{}
|
||||
|
||||
if v := params.with_payload {
|
||||
query_params['with_payload'] = json.encode(v)
|
||||
}
|
||||
|
||||
if v := params.with_vector {
|
||||
query_params['with_vector'] = json.encode(v)
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .get
|
||||
prefix: 'collections/${params.collection_name}/points/${params.id}'
|
||||
params: query_params
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
return json.decode(GetPointResponse, response.body)!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct SearchParams {
|
||||
pub mut:
|
||||
collection_name string @[required]
|
||||
vector []f32 @[required]
|
||||
limit int = 10
|
||||
filter ?Filter
|
||||
params ?SearchParamsConfig
|
||||
with_payload ?WithPayloadSelector
|
||||
with_vector ?WithVector
|
||||
score_threshold ?f32
|
||||
}
|
||||
|
||||
// Search for points
|
||||
pub fn (mut q QdrantClient) search(params SearchParams) !SearchResponse {
|
||||
// Create a struct to serialize to JSON
|
||||
struct SearchRequest {
|
||||
pub mut:
|
||||
vector []f32
|
||||
limit int
|
||||
filter ?Filter
|
||||
params ?SearchParamsConfig
|
||||
with_payload ?WithPayloadSelector
|
||||
with_vector ?WithVector
|
||||
score_threshold ?f32
|
||||
}
|
||||
|
||||
mut request := SearchRequest{
|
||||
vector: params.vector
|
||||
limit: params.limit
|
||||
}
|
||||
|
||||
if v := params.filter {
|
||||
request.filter = v
|
||||
}
|
||||
|
||||
if v := params.params {
|
||||
request.params = v
|
||||
}
|
||||
|
||||
if v := params.with_payload {
|
||||
request.with_payload = v
|
||||
}
|
||||
|
||||
if v := params.with_vector {
|
||||
request.with_vector = v
|
||||
}
|
||||
|
||||
if v := params.score_threshold {
|
||||
request.score_threshold = v
|
||||
}
|
||||
|
||||
req := httpconnection.Request{
|
||||
method: .post
|
||||
prefix: 'collections/${params.collection_name}/points/search'
|
||||
dataformat: .json
|
||||
data: json.encode(request)
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
return json.decode(SearchResponse, response.data)!
|
||||
}
|
||||
|
||||
// Service API
|
||||
|
||||
// Get Qdrant service info
|
||||
pub fn (mut q QdrantClient) get_service_info() !ServiceInfoResponse {
|
||||
req := httpconnection.Request{
|
||||
method: .get
|
||||
prefix: ''
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
return json.decode(ServiceInfoResponse, response.data)!
|
||||
}
|
||||
|
||||
// Check Qdrant health
|
||||
pub fn (mut q QdrantClient) health_check() !bool {
|
||||
req := httpconnection.Request{
|
||||
method: .get
|
||||
prefix: 'healthz'
|
||||
}
|
||||
|
||||
mut httpclient := q.httpclient()!
|
||||
response := httpclient.send(req)!
|
||||
|
||||
return response.code == 200
|
||||
}
|
||||
117
lib/clients/qdrant/qdrant_client_test.v
Normal file
117
lib/clients/qdrant/qdrant_client_test.v
Normal file
@@ -0,0 +1,117 @@
|
||||
module qdrant
|
||||
|
||||
fn test_qdrant_client() {
|
||||
mut client := QDrantClient{
|
||||
name: 'test_client'
|
||||
url: 'http://localhost:6333'
|
||||
}
|
||||
|
||||
// Test creating a collection
|
||||
vectors_config := VectorsConfig{
|
||||
size: 128
|
||||
distance: .cosine
|
||||
}
|
||||
|
||||
// Create collection
|
||||
create_result := client.create_collection(
|
||||
collection_name: 'test_collection'
|
||||
vectors: vectors_config
|
||||
) or {
|
||||
assert false, 'Failed to create collection: ${err}'
|
||||
return
|
||||
}
|
||||
assert create_result == true
|
||||
|
||||
// List collections
|
||||
collections := client.list_collections() or {
|
||||
assert false, 'Failed to list collections: ${err}'
|
||||
return
|
||||
}
|
||||
assert 'test_collection' in collections.result
|
||||
|
||||
// Get collection info
|
||||
collection_info := client.get_collection(
|
||||
collection_name: 'test_collection'
|
||||
) or {
|
||||
assert false, 'Failed to get collection info: ${err}'
|
||||
return
|
||||
}
|
||||
assert collection_info.vectors_count == 0
|
||||
|
||||
// Upsert points
|
||||
points := [
|
||||
PointStruct{
|
||||
id: '1'
|
||||
vector: [f32(0.1), 0.2, 0.3, 0.4]
|
||||
payload: {
|
||||
'color': 'red'
|
||||
'category': 'furniture'
|
||||
}
|
||||
},
|
||||
PointStruct{
|
||||
id: '2'
|
||||
vector: [f32(0.2), 0.3, 0.4, 0.5]
|
||||
payload: {
|
||||
'color': 'blue'
|
||||
'category': 'electronics'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
upsert_result := client.upsert_points(
|
||||
collection_name: 'test_collection'
|
||||
points: points
|
||||
wait: true
|
||||
) or {
|
||||
assert false, 'Failed to upsert points: ${err}'
|
||||
return
|
||||
}
|
||||
assert upsert_result.status == 'ok'
|
||||
|
||||
// Search for points
|
||||
search_result := client.search(
|
||||
collection_name: 'test_collection'
|
||||
vector: [f32(0.1), 0.2, 0.3, 0.4]
|
||||
limit: 1
|
||||
) or {
|
||||
assert false, 'Failed to search points: ${err}'
|
||||
return
|
||||
}
|
||||
assert search_result.result.len > 0
|
||||
|
||||
// Get a point
|
||||
point := client.get_point(
|
||||
collection_name: 'test_collection'
|
||||
id: '1'
|
||||
) or {
|
||||
assert false, 'Failed to get point: ${err}'
|
||||
return
|
||||
}
|
||||
if result := point.result {
|
||||
assert result.id == '1'
|
||||
} else {
|
||||
assert false, 'Point not found'
|
||||
}
|
||||
|
||||
// Delete a point
|
||||
delete_result := client.delete_points(
|
||||
collection_name: 'test_collection'
|
||||
points_selector: PointsSelector{
|
||||
points: ['1']
|
||||
}
|
||||
wait: true
|
||||
) or {
|
||||
assert false, 'Failed to delete point: ${err}'
|
||||
return
|
||||
}
|
||||
assert delete_result.status == 'ok'
|
||||
|
||||
// Delete collection
|
||||
delete_collection_result := client.delete_collection(
|
||||
collection_name: 'test_collection'
|
||||
) or {
|
||||
assert false, 'Failed to delete collection: ${err}'
|
||||
return
|
||||
}
|
||||
assert delete_collection_result == true
|
||||
}
|
||||
112
lib/clients/qdrant/qdrant_factory_.v
Normal file
112
lib/clients/qdrant/qdrant_factory_.v
Normal file
@@ -0,0 +1,112 @@
|
||||
module qdrant
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
// import freeflowuniverse.herolib.ui.console
|
||||
|
||||
__global (
|
||||
qdrant_global map[string]&QDrantClient
|
||||
qdrant_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&QDrantClient {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := QDrantClient{}
|
||||
if args.name !in qdrant_global {
|
||||
if !exists(args)! {
|
||||
set(obj)!
|
||||
} else {
|
||||
heroscript := context.hero_config_get('qdrant', args.name)!
|
||||
mut obj_ := heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return qdrant_global[args.name] or {
|
||||
println(qdrant_global)
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for qdrant with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o QDrantClient) ! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set('qdrant', o.name, heroscript)!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args_ ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists('qdrant', args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('qdrant', args.name)!
|
||||
if args.name in qdrant_global {
|
||||
// del qdrant_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o QDrantClient) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
qdrant_global[o.name] = &o2
|
||||
qdrant_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'qdrant.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// switch instance to be used for qdrant
|
||||
pub fn switch(name string) {
|
||||
qdrant_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
397
lib/clients/qdrant/qdrant_model.v
Normal file
397
lib/clients/qdrant/qdrant_model.v
Normal file
@@ -0,0 +1,397 @@
|
||||
module qdrant
|
||||
|
||||
// import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
// import json
|
||||
// import os
|
||||
|
||||
pub const version = '0.0.0'
|
||||
const singleton = false
|
||||
const default = true
|
||||
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
|
||||
@[heap]
|
||||
pub struct QDrantClient {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
secret string
|
||||
url string = 'http://localhost:6333/'
|
||||
}
|
||||
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ QDrantClient) !QDrantClient {
|
||||
mut mycfg := mycfg_
|
||||
return mycfg
|
||||
}
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj QDrantClient) !string {
|
||||
return encoderhero.encode[QDrantClient](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !QDrantClient {
|
||||
mut obj := encoderhero.decode[QDrantClient](heroscript)!
|
||||
return obj
|
||||
}
|
||||
|
||||
// Base response structure
|
||||
pub struct BaseResponse {
|
||||
pub mut:
|
||||
time f32
|
||||
status string
|
||||
}
|
||||
|
||||
// Operation response
|
||||
pub struct OperationResponse {
|
||||
pub mut:
|
||||
time f32
|
||||
status string
|
||||
result bool
|
||||
}
|
||||
|
||||
// Collections response
|
||||
pub struct CollectionsResponse {
|
||||
pub mut:
|
||||
time f32
|
||||
status string
|
||||
result []string
|
||||
}
|
||||
|
||||
// Collection info response
|
||||
pub struct CollectionInfoResponse {
|
||||
pub mut:
|
||||
time f32
|
||||
status string
|
||||
result CollectionInfo
|
||||
}
|
||||
|
||||
// Collection info
|
||||
pub struct CollectionInfo {
|
||||
pub mut:
|
||||
status string
|
||||
optimizer_status OptimizersStatus
|
||||
vectors_count u64
|
||||
indexed_vectors_count ?u64
|
||||
points_count u64
|
||||
segments_count u64
|
||||
config CollectionConfig
|
||||
payload_schema map[string]PayloadIndexInfo
|
||||
}
|
||||
|
||||
// Optimizers status
|
||||
pub struct OptimizersStatus {
|
||||
pub mut:
|
||||
status string
|
||||
}
|
||||
|
||||
// Collection config
|
||||
pub struct CollectionConfig {
|
||||
pub mut:
|
||||
params CollectionParams
|
||||
hnsw_config ?HnswConfig
|
||||
optimizer_config ?OptimizersConfig
|
||||
wal_config ?WalConfig
|
||||
quantization_config ?QuantizationConfig
|
||||
}
|
||||
|
||||
// Collection params
|
||||
pub struct CollectionParams {
|
||||
pub mut:
|
||||
vectors VectorsConfig
|
||||
shard_number ?int
|
||||
replication_factor ?int
|
||||
write_consistency_factor ?int
|
||||
on_disk_payload ?bool
|
||||
hnsw_config ?HnswConfig
|
||||
optimizers_config ?OptimizersConfig
|
||||
wal_config ?WalConfig
|
||||
quantization_config ?QuantizationConfig
|
||||
init_from ?InitFrom
|
||||
}
|
||||
|
||||
// Vectors config
|
||||
pub struct VectorsConfig {
|
||||
pub mut:
|
||||
size int
|
||||
distance Distance
|
||||
hnsw_config ?HnswConfig
|
||||
quantization_config ?QuantizationConfig
|
||||
on_disk ?bool
|
||||
}
|
||||
|
||||
// Distance type
|
||||
pub enum Distance {
|
||||
cosine
|
||||
euclid
|
||||
dot
|
||||
}
|
||||
|
||||
// Convert Distance enum to string
|
||||
pub fn (d Distance) str() string {
|
||||
return match d {
|
||||
.cosine { 'cosine' }
|
||||
.euclid { 'euclid' }
|
||||
.dot { 'dot' }
|
||||
}
|
||||
}
|
||||
|
||||
// HNSW config
|
||||
pub struct HnswConfig {
|
||||
pub mut:
|
||||
m int
|
||||
ef_construct int
|
||||
full_scan_threshold ?int
|
||||
max_indexing_threads ?int
|
||||
on_disk ?bool
|
||||
payload_m ?int
|
||||
}
|
||||
|
||||
// Optimizers config
|
||||
pub struct OptimizersConfig {
|
||||
pub mut:
|
||||
deleted_threshold f32
|
||||
vacuum_min_vector_number int
|
||||
default_segment_number int
|
||||
max_segment_size ?int
|
||||
memmap_threshold ?int
|
||||
indexing_threshold ?int
|
||||
flush_interval_sec ?int
|
||||
max_optimization_threads ?int
|
||||
}
|
||||
|
||||
// WAL config
|
||||
pub struct WalConfig {
|
||||
pub mut:
|
||||
wal_capacity_mb ?int
|
||||
wal_segments_ahead ?int
|
||||
}
|
||||
|
||||
// Quantization config
|
||||
pub struct QuantizationConfig {
|
||||
pub mut:
|
||||
scalar ?ScalarQuantization
|
||||
product ?ProductQuantization
|
||||
binary ?BinaryQuantization
|
||||
}
|
||||
|
||||
// Scalar quantization
|
||||
pub struct ScalarQuantization {
|
||||
pub mut:
|
||||
type_ string
|
||||
quantile ?f32
|
||||
always_ram ?bool
|
||||
}
|
||||
|
||||
// Product quantization
|
||||
pub struct ProductQuantization {
|
||||
pub mut:
|
||||
compression string
|
||||
always_ram ?bool
|
||||
}
|
||||
|
||||
// Binary quantization
|
||||
pub struct BinaryQuantization {
|
||||
pub mut:
|
||||
binary bool
|
||||
always_ram ?bool
|
||||
}
|
||||
|
||||
// Init from
|
||||
pub struct InitFrom {
|
||||
pub mut:
|
||||
collection string
|
||||
shard ?int
|
||||
}
|
||||
|
||||
// Payload index info
|
||||
pub struct PayloadIndexInfo {
|
||||
pub mut:
|
||||
data_type string
|
||||
params ?map[string]string
|
||||
points int
|
||||
}
|
||||
|
||||
// Points operation response
|
||||
pub struct PointsOperationResponse {
|
||||
pub mut:
|
||||
time f32
|
||||
status string
|
||||
result OperationInfo
|
||||
}
|
||||
|
||||
// Operation info
|
||||
pub struct OperationInfo {
|
||||
pub mut:
|
||||
operation_id int
|
||||
status string
|
||||
}
|
||||
|
||||
// Point struct
|
||||
pub struct PointStruct {
|
||||
pub mut:
|
||||
id string
|
||||
vector []f32
|
||||
payload ?map[string]string
|
||||
}
|
||||
|
||||
// Points selector
|
||||
pub struct PointsSelector {
|
||||
pub mut:
|
||||
points ?[]string
|
||||
filter ?Filter
|
||||
}
|
||||
|
||||
// Filter
|
||||
pub struct Filter {
|
||||
pub mut:
|
||||
must ?[]Condition
|
||||
must_not ?[]Condition
|
||||
should ?[]Condition
|
||||
}
|
||||
|
||||
// Filter is serialized directly to JSON
|
||||
|
||||
// Condition interface
|
||||
pub interface Condition {}
|
||||
|
||||
// Field condition
|
||||
pub struct FieldCondition {
|
||||
pub mut:
|
||||
key string
|
||||
match ?string @[json: match]
|
||||
match_integer ?int @[json: match]
|
||||
match_float ?f32 @[json: match]
|
||||
match_bool ?bool @[json: match]
|
||||
range ?Range
|
||||
geo_bounding_box ?GeoBoundingBox
|
||||
geo_radius ?GeoRadius
|
||||
values_count ?ValuesCount
|
||||
}
|
||||
|
||||
// FieldCondition is serialized directly to JSON
|
||||
|
||||
// Range
|
||||
pub struct Range {
|
||||
pub mut:
|
||||
lt ?f32
|
||||
gt ?f32
|
||||
gte ?f32
|
||||
lte ?f32
|
||||
}
|
||||
|
||||
// Range is serialized directly to JSON
|
||||
|
||||
// GeoBoundingBox
|
||||
pub struct GeoBoundingBox {
|
||||
pub mut:
|
||||
top_left GeoPoint
|
||||
bottom_right GeoPoint
|
||||
}
|
||||
|
||||
// GeoBoundingBox is serialized directly to JSON
|
||||
|
||||
// GeoPoint
|
||||
pub struct GeoPoint {
|
||||
pub mut:
|
||||
lon f32
|
||||
lat f32
|
||||
}
|
||||
|
||||
// GeoPoint is serialized directly to JSON
|
||||
|
||||
// GeoRadius
|
||||
pub struct GeoRadius {
|
||||
pub mut:
|
||||
center GeoPoint
|
||||
radius f32
|
||||
}
|
||||
|
||||
// GeoRadius is serialized directly to JSON
|
||||
|
||||
// ValuesCount
|
||||
pub struct ValuesCount {
|
||||
pub mut:
|
||||
lt ?int
|
||||
gt ?int
|
||||
gte ?int
|
||||
lte ?int
|
||||
}
|
||||
|
||||
// ValuesCount is serialized directly to JSON
|
||||
|
||||
// WithPayloadSelector
|
||||
pub struct WithPayloadSelector {
|
||||
pub mut:
|
||||
include ?[]string
|
||||
exclude ?[]string
|
||||
}
|
||||
|
||||
// WithPayloadSelector is serialized directly to JSON
|
||||
|
||||
// WithVector
|
||||
pub struct WithVector {
|
||||
pub mut:
|
||||
include ?[]string
|
||||
}
|
||||
|
||||
// WithVector is serialized directly to JSON
|
||||
|
||||
// Get point response
|
||||
pub struct GetPointResponse {
|
||||
pub mut:
|
||||
time f32
|
||||
status string
|
||||
result ?PointStruct
|
||||
}
|
||||
|
||||
// Search params configuration
|
||||
pub struct SearchParamsConfig {
|
||||
pub mut:
|
||||
hnsw_ef ?int
|
||||
exact ?bool
|
||||
}
|
||||
|
||||
// SearchParamsConfig is serialized directly to JSON
|
||||
|
||||
// Search response
|
||||
pub struct SearchResponse {
|
||||
pub mut:
|
||||
time f32
|
||||
status string
|
||||
result []ScoredPoint
|
||||
}
|
||||
|
||||
// Scored point
|
||||
pub struct ScoredPoint {
|
||||
pub mut:
|
||||
id string
|
||||
version int
|
||||
score f32
|
||||
payload ?map[string]string
|
||||
vector ?[]f32
|
||||
}
|
||||
|
||||
// Write ordering
|
||||
pub struct WriteOrdering {
|
||||
pub mut:
|
||||
type_ string
|
||||
}
|
||||
|
||||
// WriteOrdering is serialized directly to JSON
|
||||
|
||||
// Service info response
|
||||
pub struct ServiceInfoResponse {
|
||||
pub mut:
|
||||
time f32
|
||||
status string
|
||||
result ServiceInfo
|
||||
}
|
||||
|
||||
// Service info
|
||||
pub struct ServiceInfo {
|
||||
pub mut:
|
||||
version string
|
||||
commit ?string
|
||||
}
|
||||
169
lib/clients/qdrant/readme.md
Normal file
169
lib/clients/qdrant/readme.md
Normal file
@@ -0,0 +1,169 @@
|
||||
# Qdrant Client for HeroLib
|
||||
|
||||
This is a V client for [Qdrant](https://qdrant.tech/), a high-performance vector database and similarity search engine.
|
||||
|
||||
## Features
|
||||
|
||||
- Collection management (create, list, delete, get info)
|
||||
- Points management (upsert, delete, search, get)
|
||||
- Service information and health checks
|
||||
- Support for filters, payload management, and vector operations
|
||||
|
||||
## Usage
|
||||
|
||||
### Initialize Client
|
||||
|
||||
```v
|
||||
// Create a new Qdrant client
|
||||
import freeflowuniverse.herolib.clients.qdrant
|
||||
|
||||
mut client := qdrant.get()!
|
||||
|
||||
// Or create with custom configuration
|
||||
mut custom_client := qdrant.QDrantClient{
|
||||
name: 'custom',
|
||||
url: 'http://localhost:6333',
|
||||
secret: 'your_api_key' // Optional
|
||||
}
|
||||
qdrant.set(custom_client)!
|
||||
```
|
||||
|
||||
### Collection Management
|
||||
|
||||
```v
|
||||
// Create a collection
|
||||
vectors_config := qdrant.VectorsConfig{
|
||||
size: 128,
|
||||
distance: .cosine
|
||||
}
|
||||
|
||||
client.create_collection(
|
||||
collection_name: 'my_collection',
|
||||
vectors: vectors_config
|
||||
)!
|
||||
|
||||
// List all collections
|
||||
collections := client.list_collections()!
|
||||
|
||||
// Get collection info
|
||||
collection_info := client.get_collection(
|
||||
collection_name: 'my_collection'
|
||||
)!
|
||||
|
||||
// Delete a collection
|
||||
client.delete_collection(
|
||||
collection_name: 'my_collection'
|
||||
)!
|
||||
```
|
||||
|
||||
### Points Management
|
||||
|
||||
```v
|
||||
// Upsert points
|
||||
points := [
|
||||
qdrant.PointStruct{
|
||||
id: '1',
|
||||
vector: [f32(0.1), 0.2, 0.3, 0.4],
|
||||
payload: {
|
||||
'color': 'red',
|
||||
'category': 'furniture'
|
||||
}
|
||||
},
|
||||
qdrant.PointStruct{
|
||||
id: '2',
|
||||
vector: [f32(0.2), 0.3, 0.4, 0.5],
|
||||
payload: {
|
||||
'color': 'blue',
|
||||
'category': 'electronics'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
client.upsert_points(
|
||||
collection_name: 'my_collection',
|
||||
points: points,
|
||||
wait: true
|
||||
)!
|
||||
|
||||
// Search for points
|
||||
search_result := client.search(
|
||||
collection_name: 'my_collection',
|
||||
vector: [f32(0.1), 0.2, 0.3, 0.4],
|
||||
limit: 10
|
||||
)!
|
||||
|
||||
// Get a point by ID
|
||||
point := client.get_point(
|
||||
collection_name: 'my_collection',
|
||||
id: '1'
|
||||
)!
|
||||
|
||||
// Delete points
|
||||
client.delete_points(
|
||||
collection_name: 'my_collection',
|
||||
points_selector: qdrant.PointsSelector{
|
||||
points: ['1', '2']
|
||||
},
|
||||
wait: true
|
||||
)!
|
||||
```
|
||||
|
||||
### Service Information
|
||||
|
||||
```v
|
||||
// Get service info
|
||||
service_info := client.get_service_info()!
|
||||
|
||||
// Check health
|
||||
is_healthy := client.health_check()!
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Filtering
|
||||
|
||||
```v
|
||||
// Create a filter
|
||||
filter := qdrant.Filter{
|
||||
must: [
|
||||
qdrant.FieldCondition{
|
||||
key: 'color',
|
||||
match: 'red'
|
||||
},
|
||||
qdrant.FieldCondition{
|
||||
key: 'price',
|
||||
range: qdrant.Range{
|
||||
gte: 10.0,
|
||||
lt: 100.0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
// Search with filter
|
||||
search_result := client.search(
|
||||
collection_name: 'my_collection',
|
||||
vector: [f32(0.1), 0.2, 0.3, 0.4],
|
||||
filter: filter,
|
||||
limit: 10
|
||||
)!
|
||||
```
|
||||
|
||||
## Example HeroScript
|
||||
|
||||
```hero
|
||||
!!qdrant.configure
|
||||
name: 'default'
|
||||
secret: 'your_api_key'
|
||||
url: 'http://localhost:6333'
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
Qdrant server can be installed using the provided installer script:
|
||||
|
||||
```bash
|
||||
~/code/github/freeflowuniverse/herolib/examples/installers/db/qdrant.vsh
|
||||
```
|
||||
|
||||
This will install and start a Qdrant server locally.
|
||||
@@ -115,13 +115,13 @@ fn install() ! {
|
||||
console.print_header('install ${args.name}')
|
||||
//THIS IS EXAMPLE CODEAND NEEDS TO BE CHANGED
|
||||
// mut url := ''
|
||||
// if core.is_linux_arm() {
|
||||
// if core.is_linux_arm()! {
|
||||
// url = 'https://github.com/${args.name}-dev/${args.name}/releases/download/v??{version}/${args.name}_??{version}_linux_arm64.tar.gz'
|
||||
// } else if core.is_linux_intel() {
|
||||
// } else if core.is_linux_intel()! {
|
||||
// url = 'https://github.com/${args.name}-dev/${args.name}/releases/download/v??{version}/${args.name}_??{version}_linux_amd64.tar.gz'
|
||||
// } else if core.is_osx_arm() {
|
||||
// } else if core.is_osx_arm()! {
|
||||
// url = 'https://github.com/${args.name}-dev/${args.name}/releases/download/v??{version}/${args.name}_??{version}_darwin_arm64.tar.gz'
|
||||
// } else if osal.is_osx_intel() {
|
||||
// } else if osal.is_osx_intel()! {
|
||||
// url = 'https://github.com/${args.name}-dev/${args.name}/releases/download/v??{version}/${args.name}_??{version}_darwin_amd64.tar.gz'
|
||||
// } else {
|
||||
// return error('unsported platform')
|
||||
|
||||
@@ -7,7 +7,7 @@ import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
|
||||
@if args.cat == .installer
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
import freeflowuniverse.herolib.osal.startupmanager
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
@if args.startupmanager
|
||||
import time
|
||||
|
||||
13
lib/installers/db/qdrant/.heroscript
Normal file
13
lib/installers/db/qdrant/.heroscript
Normal file
@@ -0,0 +1,13 @@
|
||||
|
||||
!!hero_code.generate_installer
|
||||
name:'qdrant'
|
||||
classname:'QDrant'
|
||||
singleton:0
|
||||
templates:1
|
||||
default:1
|
||||
title:''
|
||||
supported_platforms:''
|
||||
reset:0
|
||||
startupmanager:1
|
||||
hasconfig:1
|
||||
build:1
|
||||
149
lib/installers/db/qdrant/qdrant_actions.v
Normal file
149
lib/installers/db/qdrant/qdrant_actions.v
Normal file
@@ -0,0 +1,149 @@
|
||||
module qdrant
|
||||
|
||||
import freeflowuniverse.herolib.osal
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.core
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
// import freeflowuniverse.herolib.core.pathlib
|
||||
// import freeflowuniverse.herolib.osal.systemd
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import freeflowuniverse.herolib.installers.ulist
|
||||
// import freeflowuniverse.herolib.installers.lang.golang
|
||||
// import freeflowuniverse.herolib.installers.lang.rust
|
||||
// import freeflowuniverse.herolib.installers.lang.python
|
||||
import freeflowuniverse.herolib.core.httpconnection
|
||||
import os
|
||||
|
||||
fn startupcmd() ![]zinit.ZProcessNewArgs {
|
||||
mut res := []zinit.ZProcessNewArgs{}
|
||||
res << zinit.ZProcessNewArgs{
|
||||
name: 'qdrant'
|
||||
cmd: 'qdrant --config-path ${os.home_dir()}/hero/var/qdrant/config.yaml'
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
fn running() !bool {
|
||||
println("running")
|
||||
mut installer := get()!
|
||||
url:='curl http://localhost:6333'
|
||||
mut conn := httpconnection.new(name: 'qdrant', url: url)!
|
||||
r := conn.get(prefix: 'healthz', debug: false) or {return false}
|
||||
println(r)
|
||||
return false
|
||||
}
|
||||
|
||||
fn start_pre() ! {
|
||||
}
|
||||
|
||||
fn start_post() ! {
|
||||
}
|
||||
|
||||
fn stop_pre() ! {
|
||||
}
|
||||
|
||||
fn stop_post() ! {
|
||||
}
|
||||
|
||||
//////////////////// following actions are not specific to instance of the object
|
||||
|
||||
// checks if a certain version or above is installed
|
||||
fn installed() !bool {
|
||||
res := os.execute('${osal.profile_path_source_and()!} qdrant -V')
|
||||
if res.exit_code != 0 {
|
||||
println("Error to call qdrant: ${res}")
|
||||
return false
|
||||
}
|
||||
r := res.output.split_into_lines().filter(it.contains("qdrant"))
|
||||
if r.len != 1 {
|
||||
return error("couldn't parse qdrant version.\n${res.output}")
|
||||
}
|
||||
if texttools.version(version) == texttools.version(r[0].all_after("qdrant")) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// get the Upload List of the files
|
||||
fn ulist_get() !ulist.UList {
|
||||
return ulist.UList{}
|
||||
}
|
||||
|
||||
// uploads to S3 server if configured
|
||||
fn upload() ! {
|
||||
// installers.upload(
|
||||
// cmdname: 'qdrant'
|
||||
// source: '${gitpath}/target/x86_64-unknown-linux-musl/release/qdrant'
|
||||
// )!
|
||||
}
|
||||
|
||||
fn install() ! {
|
||||
console.print_header('install qdrant')
|
||||
mut url := ''
|
||||
if core.is_linux_arm()! {
|
||||
url = 'https://github.com/qdrant/qdrant/releases/download/v${version}/qdrant-aarch64-unknown-linux-musl.tar.gz'
|
||||
} else if core.is_linux_intel()! {
|
||||
url = 'https://github.com/qdrant/qdrant/releases/download/v${version}/qdrant-x86_64-unknown-linux-musl.tar.gz'
|
||||
} else if core.is_osx_arm()! {
|
||||
url = 'https://github.com/qdrant/qdrant/releases/download/v${version}/qdrant-aarch64-apple-darwin.tar.gz'
|
||||
} else if core.is_osx_intel()! {
|
||||
url = 'https://github.com/qdrant/qdrant/releases/download/v${version}/qdrant-x86_64-apple-darwin.tar.gz'
|
||||
} else {
|
||||
return error('unsported platform')
|
||||
}
|
||||
mut dest := osal.download(
|
||||
url: url
|
||||
minsize_kb: 18000
|
||||
expand_dir: '/tmp/qdrant'
|
||||
)!
|
||||
|
||||
mut binpath := dest.file_get('qdrant')!
|
||||
osal.cmd_add(
|
||||
cmdname: 'qdrant'
|
||||
source: binpath.path
|
||||
)!
|
||||
}
|
||||
|
||||
fn build() ! {
|
||||
// url := 'https://github.com/threefoldtech/qdrant'
|
||||
|
||||
// make sure we install base on the node
|
||||
// if osal.platform() != .ubuntu {
|
||||
// return error('only support ubuntu for now')
|
||||
// }
|
||||
// golang.install()!
|
||||
|
||||
// console.print_header('build qdrant')
|
||||
|
||||
// gitpath := gittools.get_repo(coderoot: '/tmp/builder', url: url, reset: true, pull: true)!
|
||||
|
||||
// cmd := '
|
||||
// cd ${gitpath}
|
||||
// source ~/.cargo/env
|
||||
// exit 1 #todo
|
||||
// '
|
||||
// osal.execute_stdout(cmd)!
|
||||
//
|
||||
// //now copy to the default bin path
|
||||
// mut binpath := dest.file_get('...')!
|
||||
// adds it to path
|
||||
// osal.cmd_add(
|
||||
// cmdname: 'griddriver2'
|
||||
// source: binpath.path
|
||||
// )!
|
||||
}
|
||||
|
||||
fn destroy() ! {
|
||||
|
||||
osal.process_kill_recursive(name:'qdrant')!
|
||||
osal.cmd_delete('qdrant')!
|
||||
|
||||
osal.package_remove('
|
||||
qdrant
|
||||
')!
|
||||
|
||||
osal.rm("
|
||||
qdrant
|
||||
${os.home_dir()}/hero/var/qdrant
|
||||
")!
|
||||
}
|
||||
279
lib/installers/db/qdrant/qdrant_factory_.v
Normal file
279
lib/installers/db/qdrant/qdrant_factory_.v
Normal file
@@ -0,0 +1,279 @@
|
||||
module qdrant
|
||||
|
||||
import freeflowuniverse.herolib.core.base
|
||||
import freeflowuniverse.herolib.core.playbook
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.osal.startupmanager
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
import time
|
||||
|
||||
__global (
|
||||
qdrant_global map[string]&QDrant
|
||||
qdrant_default string
|
||||
)
|
||||
|
||||
/////////FACTORY
|
||||
|
||||
@[params]
|
||||
pub struct ArgsGet {
|
||||
pub mut:
|
||||
name string
|
||||
}
|
||||
|
||||
fn args_get(args_ ArgsGet) ArgsGet {
|
||||
mut args := args_
|
||||
if args.name == '' {
|
||||
args.name = 'default'
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
pub fn get(args_ ArgsGet) !&QDrant {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
mut obj := QDrant{}
|
||||
if args.name !in qdrant_global {
|
||||
if !exists(args)! {
|
||||
set(obj)!
|
||||
} else {
|
||||
heroscript := context.hero_config_get('qdrant', args.name)!
|
||||
mut obj_ := heroscript_loads(heroscript)!
|
||||
set_in_mem(obj_)!
|
||||
}
|
||||
}
|
||||
return qdrant_global[args.name] or {
|
||||
println(qdrant_global)
|
||||
// bug if we get here because should be in globals
|
||||
panic('could not get config for qdrant with name, is bug:${args.name}')
|
||||
}
|
||||
}
|
||||
|
||||
// register the config for the future
|
||||
pub fn set(o QDrant) ! {
|
||||
set_in_mem(o)!
|
||||
mut context := base.context()!
|
||||
heroscript := heroscript_dumps(o)!
|
||||
context.hero_config_set('qdrant', o.name, heroscript)!
|
||||
}
|
||||
|
||||
// does the config exists?
|
||||
pub fn exists(args_ ArgsGet) !bool {
|
||||
mut context := base.context()!
|
||||
mut args := args_get(args_)
|
||||
return context.hero_config_exists('qdrant', args.name)
|
||||
}
|
||||
|
||||
pub fn delete(args_ ArgsGet) ! {
|
||||
mut args := args_get(args_)
|
||||
mut context := base.context()!
|
||||
context.hero_config_delete('qdrant', args.name)!
|
||||
if args.name in qdrant_global {
|
||||
// del qdrant_global[args.name]
|
||||
}
|
||||
}
|
||||
|
||||
// only sets in mem, does not set as config
|
||||
fn set_in_mem(o QDrant) ! {
|
||||
mut o2 := obj_init(o)!
|
||||
qdrant_global[o.name] = &o2
|
||||
qdrant_default = o.name
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct PlayArgs {
|
||||
pub mut:
|
||||
heroscript string // if filled in then plbook will be made out of it
|
||||
plbook ?playbook.PlayBook
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn play(args_ PlayArgs) ! {
|
||||
mut args := args_
|
||||
|
||||
mut plbook := args.plbook or { playbook.new(text: args.heroscript)! }
|
||||
|
||||
mut install_actions := plbook.find(filter: 'qdrant.configure')!
|
||||
if install_actions.len > 0 {
|
||||
for install_action in install_actions {
|
||||
heroscript := install_action.heroscript()
|
||||
mut obj2 := heroscript_loads(heroscript)!
|
||||
set(obj2)!
|
||||
}
|
||||
}
|
||||
|
||||
mut other_actions := plbook.find(filter: 'qdrant.')!
|
||||
for other_action in other_actions {
|
||||
if other_action.name in ['destroy', 'install', 'build'] {
|
||||
mut p := other_action.params
|
||||
reset := p.get_default_false('reset')
|
||||
if other_action.name == 'destroy' || reset {
|
||||
console.print_debug('install action qdrant.destroy')
|
||||
destroy()!
|
||||
}
|
||||
if other_action.name == 'install' {
|
||||
console.print_debug('install action qdrant.install')
|
||||
install()!
|
||||
}
|
||||
}
|
||||
if other_action.name in ['start', 'stop', 'restart'] {
|
||||
mut p := other_action.params
|
||||
name := p.get('name')!
|
||||
mut qdrant_obj := get(name: name)!
|
||||
console.print_debug('action object:\n${qdrant_obj}')
|
||||
if other_action.name == 'start' {
|
||||
console.print_debug('install action qdrant.${other_action.name}')
|
||||
qdrant_obj.start()!
|
||||
}
|
||||
|
||||
if other_action.name == 'stop' {
|
||||
console.print_debug('install action qdrant.${other_action.name}')
|
||||
qdrant_obj.stop()!
|
||||
}
|
||||
if other_action.name == 'restart' {
|
||||
console.print_debug('install action qdrant.${other_action.name}')
|
||||
qdrant_obj.restart()!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//////////////////////////# LIVE CYCLE MANAGEMENT FOR INSTALLERS ///////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
fn startupmanager_get(cat zinit.StartupManagerType) !startupmanager.StartupManager {
|
||||
// unknown
|
||||
// screen
|
||||
// zinit
|
||||
// tmux
|
||||
// systemd
|
||||
match cat {
|
||||
.zinit {
|
||||
console.print_debug('startupmanager: zinit')
|
||||
return startupmanager.get(cat: .zinit)!
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('startupmanager: systemd')
|
||||
return startupmanager.get(cat: .systemd)!
|
||||
}
|
||||
else {
|
||||
console.print_debug('startupmanager: auto')
|
||||
return startupmanager.get()!
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// load from disk and make sure is properly intialized
|
||||
pub fn (mut self QDrant) reload() ! {
|
||||
switch(self.name)
|
||||
self = obj_init(self)!
|
||||
}
|
||||
|
||||
pub fn (mut self QDrant) start() ! {
|
||||
switch(self.name)
|
||||
if self.running()! {
|
||||
return
|
||||
}
|
||||
|
||||
console.print_header('qdrant start')
|
||||
|
||||
if !installed()! {
|
||||
install()!
|
||||
}
|
||||
|
||||
configure()!
|
||||
|
||||
start_pre()!
|
||||
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
|
||||
console.print_debug('starting qdrant with ${zprocess.startuptype}...')
|
||||
|
||||
sm.new(zprocess)!
|
||||
|
||||
sm.start(zprocess.name)!
|
||||
}
|
||||
|
||||
start_post()!
|
||||
|
||||
for _ in 0 .. 50 {
|
||||
if self.running()! {
|
||||
return
|
||||
}
|
||||
time.sleep(100 * time.millisecond)
|
||||
}
|
||||
return error('qdrant did not install properly.')
|
||||
}
|
||||
|
||||
pub fn (mut self QDrant) install_start(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
self.install(args)!
|
||||
self.start()!
|
||||
}
|
||||
|
||||
pub fn (mut self QDrant) stop() ! {
|
||||
switch(self.name)
|
||||
stop_pre()!
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
sm.stop(zprocess.name)!
|
||||
}
|
||||
stop_post()!
|
||||
}
|
||||
|
||||
pub fn (mut self QDrant) restart() ! {
|
||||
switch(self.name)
|
||||
self.stop()!
|
||||
self.start()!
|
||||
}
|
||||
|
||||
pub fn (mut self QDrant) running() !bool {
|
||||
switch(self.name)
|
||||
|
||||
// walk over the generic processes, if not running return
|
||||
for zprocess in startupcmd()! {
|
||||
mut sm := startupmanager_get(zprocess.startuptype)!
|
||||
r := sm.running(zprocess.name)!
|
||||
if r == false {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return running()!
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct InstallArgs {
|
||||
pub mut:
|
||||
reset bool
|
||||
}
|
||||
|
||||
pub fn (mut self QDrant) install(args InstallArgs) ! {
|
||||
switch(self.name)
|
||||
if args.reset || (!installed()!) {
|
||||
install()!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self QDrant) build() ! {
|
||||
switch(self.name)
|
||||
build()!
|
||||
}
|
||||
|
||||
pub fn (mut self QDrant) destroy() ! {
|
||||
switch(self.name)
|
||||
self.stop() or {}
|
||||
destroy()!
|
||||
}
|
||||
|
||||
// switch instance to be used for qdrant
|
||||
pub fn switch(name string) {
|
||||
qdrant_default = name
|
||||
}
|
||||
|
||||
// helpers
|
||||
|
||||
@[params]
|
||||
pub struct DefaultConfigArgs {
|
||||
instance string = 'default'
|
||||
}
|
||||
50
lib/installers/db/qdrant/qdrant_model.v
Normal file
50
lib/installers/db/qdrant/qdrant_model.v
Normal file
@@ -0,0 +1,50 @@
|
||||
module qdrant
|
||||
import os
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
pub const version = '1.13.4'
|
||||
const singleton = false
|
||||
const default = true
|
||||
|
||||
|
||||
// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED
|
||||
@[heap]
|
||||
pub struct QDrant {
|
||||
pub mut:
|
||||
name string = 'default'
|
||||
homedir string
|
||||
configpath string
|
||||
username string
|
||||
password string @[secret]
|
||||
title string
|
||||
host string
|
||||
port int
|
||||
}
|
||||
|
||||
// your checking & initialization code if needed
|
||||
fn obj_init(mycfg_ QDrant) !QDrant {
|
||||
mut mycfg := mycfg_
|
||||
return mycfg
|
||||
}
|
||||
|
||||
// called before start if done
|
||||
fn configure() ! {
|
||||
mut installer := get()!
|
||||
storage_path:="${os.home_dir()}/hero/var/qdrant"
|
||||
mut mycode := $tmpl('templates/config.yaml')
|
||||
mut path := pathlib.get_file(path: "${os.home_dir()}/hero/var/qdrant/config.yaml", create: true)!
|
||||
path.write(mycode)!
|
||||
// console.print_debug(mycode)
|
||||
}
|
||||
|
||||
/////////////NORMALLY NO NEED TO TOUCH
|
||||
|
||||
pub fn heroscript_dumps(obj QDrant) !string {
|
||||
return encoderhero.encode[QDrant](obj)!
|
||||
}
|
||||
|
||||
pub fn heroscript_loads(heroscript string) !QDrant {
|
||||
mut obj := encoderhero.decode[QDrant](heroscript)!
|
||||
return obj
|
||||
}
|
||||
44
lib/installers/db/qdrant/readme.md
Normal file
44
lib/installers/db/qdrant/readme.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# qdrant
|
||||
|
||||
|
||||
|
||||
To get started
|
||||
|
||||
```vlang
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.installers.something.qdrant as qdrant_installer
|
||||
|
||||
heroscript:="
|
||||
!!qdrant.configure name:'test'
|
||||
password: '1234'
|
||||
port: 7701
|
||||
|
||||
!!qdrant.start name:'test' reset:1
|
||||
"
|
||||
|
||||
qdrant_installer.play(heroscript=heroscript)!
|
||||
|
||||
//or we can call the default and do a start with reset
|
||||
//mut installer:= qdrant_installer.get()!
|
||||
//installer.start(reset:true)!
|
||||
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## example heroscript
|
||||
|
||||
```hero
|
||||
!!qdrant.configure
|
||||
homedir: '/home/user/qdrant'
|
||||
username: 'admin'
|
||||
password: 'secretpassword'
|
||||
title: 'Some Title'
|
||||
host: 'localhost'
|
||||
port: 8888
|
||||
|
||||
```
|
||||
|
||||
|
||||
353
lib/installers/db/qdrant/templates/config.yaml
Normal file
353
lib/installers/db/qdrant/templates/config.yaml
Normal file
@@ -0,0 +1,353 @@
|
||||
log_level: INFO
|
||||
|
||||
# Logging configuration
|
||||
# Qdrant logs to stdout. You may configure to also write logs to a file on disk.
|
||||
# Be aware that this file may grow indefinitely.
|
||||
# logger:
|
||||
# # Logging format, supports `text` and `json`
|
||||
# format: text
|
||||
# on_disk:
|
||||
# enabled: true
|
||||
# log_file: path/to/log/file.log
|
||||
# log_level: INFO
|
||||
# # Logging format, supports `text` and `json`
|
||||
# format: text
|
||||
|
||||
storage:
|
||||
# Where to store all the data
|
||||
storage_path: ${storage_path}/storage
|
||||
|
||||
# Where to store snapshots
|
||||
snapshots_path: ${storage_path}/snapshots
|
||||
|
||||
snapshots_config:
|
||||
# "local" or "s3" - where to store snapshots
|
||||
snapshots_storage: local
|
||||
# s3_config:
|
||||
# bucket: ""
|
||||
# region: ""
|
||||
# access_key: ""
|
||||
# secret_key: ""
|
||||
|
||||
# Where to store temporary files
|
||||
# If null, temporary snapshots are stored in: storage/snapshots_temp/
|
||||
temp_path: null
|
||||
|
||||
# If true - point payloads will not be stored in memory.
|
||||
# It will be read from the disk every time it is requested.
|
||||
# This setting saves RAM by (slightly) increasing the response time.
|
||||
# Note: those payload values that are involved in filtering and are indexed - remain in RAM.
|
||||
#
|
||||
# Default: true
|
||||
on_disk_payload: true
|
||||
|
||||
# Maximum number of concurrent updates to shard replicas
|
||||
# If `null` - maximum concurrency is used.
|
||||
update_concurrency: null
|
||||
|
||||
# Write-ahead-log related configuration
|
||||
wal:
|
||||
# Size of a single WAL segment
|
||||
wal_capacity_mb: 32
|
||||
|
||||
# Number of WAL segments to create ahead of actual data requirement
|
||||
wal_segments_ahead: 0
|
||||
|
||||
# Normal node - receives all updates and answers all queries
|
||||
node_type: "Normal"
|
||||
|
||||
# Listener node - receives all updates, but does not answer search/read queries
|
||||
# Useful for setting up a dedicated backup node
|
||||
# node_type: "Listener"
|
||||
|
||||
performance:
|
||||
# Number of parallel threads used for search operations. If 0 - auto selection.
|
||||
max_search_threads: 1
|
||||
|
||||
# Max number of threads (jobs) for running optimizations across all collections, each thread runs one job.
|
||||
# If 0 - have no limit and choose dynamically to saturate CPU.
|
||||
# Note: each optimization job will also use `max_indexing_threads` threads by itself for index building.
|
||||
max_optimization_threads: 1
|
||||
|
||||
# CPU budget, how many CPUs (threads) to allocate for an optimization job.
|
||||
# If 0 - auto selection, keep 1 or more CPUs unallocated depending on CPU size
|
||||
# If negative - subtract this number of CPUs from the available CPUs.
|
||||
# If positive - use this exact number of CPUs.
|
||||
optimizer_cpu_budget: 1
|
||||
|
||||
# Prevent DDoS of too many concurrent updates in distributed mode.
|
||||
# One external update usually triggers multiple internal updates, which breaks internal
|
||||
# timings. For example, the health check timing and consensus timing.
|
||||
# If null - auto selection.
|
||||
update_rate_limit: null
|
||||
|
||||
# Limit for number of incoming automatic shard transfers per collection on this node, does not affect user-requested transfers.
|
||||
# The same value should be used on all nodes in a cluster.
|
||||
# Default is to allow 1 transfer.
|
||||
# If null - allow unlimited transfers.
|
||||
#incoming_shard_transfers_limit: 1
|
||||
|
||||
# Limit for number of outgoing automatic shard transfers per collection on this node, does not affect user-requested transfers.
|
||||
# The same value should be used on all nodes in a cluster.
|
||||
# Default is to allow 1 transfer.
|
||||
# If null - allow unlimited transfers.
|
||||
#outgoing_shard_transfers_limit: 1
|
||||
|
||||
# Enable async scorer which uses io_uring when rescoring.
|
||||
# Only supported on Linux, must be enabled in your kernel.
|
||||
# See: <https://qdrant.tech/articles/io_uring/#and-what-about-qdrant>
|
||||
#async_scorer: false
|
||||
|
||||
optimizers:
|
||||
# The minimal fraction of deleted vectors in a segment, required to perform segment optimization
|
||||
deleted_threshold: 0.2
|
||||
|
||||
# The minimal number of vectors in a segment, required to perform segment optimization
|
||||
vacuum_min_vector_number: 1000
|
||||
|
||||
# Target amount of segments optimizer will try to keep.
|
||||
# Real amount of segments may vary depending on multiple parameters:
|
||||
# - Amount of stored points
|
||||
# - Current write RPS
|
||||
#
|
||||
# It is recommended to select default number of segments as a factor of the number of search threads,
|
||||
# so that each segment would be handled evenly by one of the threads.
|
||||
# If `default_segment_number = 0`, will be automatically selected by the number of available CPUs
|
||||
default_segment_number: 0
|
||||
|
||||
# Do not create segments larger this size (in KiloBytes).
|
||||
# Large segments might require disproportionately long indexation times,
|
||||
# therefore it makes sense to limit the size of segments.
|
||||
#
|
||||
# If indexation speed have more priority for your - make this parameter lower.
|
||||
# If search speed is more important - make this parameter higher.
|
||||
# Note: 1Kb = 1 vector of size 256
|
||||
# If not set, will be automatically selected considering the number of available CPUs.
|
||||
max_segment_size_kb: null
|
||||
|
||||
# Maximum size (in KiloBytes) of vectors to store in-memory per segment.
|
||||
# Segments larger than this threshold will be stored as read-only memmapped file.
|
||||
# To enable memmap storage, lower the threshold
|
||||
# Note: 1Kb = 1 vector of size 256
|
||||
# To explicitly disable mmap optimization, set to `0`.
|
||||
# If not set, will be disabled by default.
|
||||
memmap_threshold_kb: null
|
||||
|
||||
# Maximum size (in KiloBytes) of vectors allowed for plain index.
|
||||
# Default value based on https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md
|
||||
# Note: 1Kb = 1 vector of size 256
|
||||
# To explicitly disable vector indexing, set to `0`.
|
||||
# If not set, the default value will be used.
|
||||
indexing_threshold_kb: 20000
|
||||
|
||||
# Interval between forced flushes.
|
||||
flush_interval_sec: 5
|
||||
|
||||
# Max number of threads (jobs) for running optimizations per shard.
|
||||
# Note: each optimization job will also use `max_indexing_threads` threads by itself for index building.
|
||||
# If null - have no limit and choose dynamically to saturate CPU.
|
||||
# If 0 - no optimization threads, optimizations will be disabled.
|
||||
max_optimization_threads: null
|
||||
|
||||
# This section has the same options as 'optimizers' above. All values specified here will overwrite the collections
|
||||
# optimizers configs regardless of the config above and the options specified at collection creation.
|
||||
#optimizers_overwrite:
|
||||
# deleted_threshold: 0.2
|
||||
# vacuum_min_vector_number: 1000
|
||||
# default_segment_number: 0
|
||||
# max_segment_size_kb: null
|
||||
# memmap_threshold_kb: null
|
||||
# indexing_threshold_kb: 20000
|
||||
# flush_interval_sec: 5
|
||||
# max_optimization_threads: null
|
||||
|
||||
# Default parameters of HNSW Index. Could be overridden for each collection or named vector individually
|
||||
hnsw_index:
|
||||
# Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.
|
||||
m: 16
|
||||
|
||||
# Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build index.
|
||||
ef_construct: 100
|
||||
|
||||
# Minimal size (in KiloBytes) of vectors for additional payload-based indexing.
|
||||
# If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used -
|
||||
# in this case full-scan search should be preferred by query planner and additional indexing is not required.
|
||||
# Note: 1Kb = 1 vector of size 256
|
||||
full_scan_threshold_kb: 10000
|
||||
|
||||
# Number of parallel threads used for background index building.
|
||||
# If 0 - automatically select.
|
||||
# Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs.
|
||||
# On small CPUs, less threads are used.
|
||||
max_indexing_threads: 0
|
||||
|
||||
# Store HNSW index on disk. If set to false, index will be stored in RAM. Default: false
|
||||
on_disk: false
|
||||
|
||||
# Custom M param for hnsw graph built for payload index. If not set, default M will be used.
|
||||
payload_m: null
|
||||
|
||||
# Default shard transfer method to use if none is defined.
|
||||
# If null - don't have a shard transfer preference, choose automatically.
|
||||
# If stream_records, snapshot or wal_delta - prefer this specific method.
|
||||
# More info: https://qdrant.tech/documentation/guides/distributed_deployment/#shard-transfer-method
|
||||
shard_transfer_method: null
|
||||
|
||||
# Default parameters for collections
|
||||
collection:
|
||||
# Number of replicas of each shard that network tries to maintain
|
||||
replication_factor: 1
|
||||
|
||||
# How many replicas should apply the operation for us to consider it successful
|
||||
write_consistency_factor: 1
|
||||
|
||||
# Default parameters for vectors.
|
||||
vectors:
|
||||
# Whether vectors should be stored in memory or on disk.
|
||||
on_disk: null
|
||||
|
||||
# shard_number_per_node: 1
|
||||
|
||||
# Default quantization configuration.
|
||||
# More info: https://qdrant.tech/documentation/guides/quantization
|
||||
quantization: null
|
||||
|
||||
# Default strict mode parameters for newly created collections.
|
||||
strict_mode:
|
||||
# Whether strict mode is enabled for a collection or not.
|
||||
enabled: false
|
||||
|
||||
# Max allowed `limit` parameter for all APIs that don't have their own max limit.
|
||||
max_query_limit: null
|
||||
|
||||
# Max allowed `timeout` parameter.
|
||||
max_timeout: null
|
||||
|
||||
# Allow usage of unindexed fields in retrieval based (eg. search) filters.
|
||||
unindexed_filtering_retrieve: null
|
||||
|
||||
# Allow usage of unindexed fields in filtered updates (eg. delete by payload).
|
||||
unindexed_filtering_update: null
|
||||
|
||||
# Max HNSW value allowed in search parameters.
|
||||
search_max_hnsw_ef: null
|
||||
|
||||
# Whether exact search is allowed or not.
|
||||
search_allow_exact: null
|
||||
|
||||
# Max oversampling value allowed in search.
|
||||
search_max_oversampling: null
|
||||
|
||||
service:
|
||||
# Maximum size of POST data in a single request in megabytes
|
||||
max_request_size_mb: 32
|
||||
|
||||
# Number of parallel workers used for serving the api. If 0 - equal to the number of available cores.
|
||||
# If missing - Same as storage.max_search_threads
|
||||
max_workers: 0
|
||||
|
||||
# Host to bind the service on
|
||||
host: 0.0.0.0
|
||||
|
||||
# HTTP(S) port to bind the service on
|
||||
http_port: 6333
|
||||
|
||||
# gRPC port to bind the service on.
|
||||
# If `null` - gRPC is disabled. Default: null
|
||||
# Comment to disable gRPC:
|
||||
grpc_port: 6334
|
||||
|
||||
# Enable CORS headers in REST API.
|
||||
# If enabled, browsers would be allowed to query REST endpoints regardless of query origin.
|
||||
# More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
|
||||
# Default: true
|
||||
enable_cors: true
|
||||
|
||||
# Enable HTTPS for the REST and gRPC API
|
||||
enable_tls: false
|
||||
|
||||
# Check user HTTPS client certificate against CA file specified in tls config
|
||||
verify_https_client_certificate: false
|
||||
|
||||
# Set an api-key.
|
||||
# If set, all requests must include a header with the api-key.
|
||||
# example header: `api-key: <API-KEY>`
|
||||
#
|
||||
# If you enable this you should also enable TLS.
|
||||
# (Either above or via an external service like nginx.)
|
||||
# Sending an api-key over an unencrypted channel is insecure.
|
||||
#
|
||||
# Uncomment to enable.
|
||||
# api_key: your_secret_api_key_here
|
||||
|
||||
# Set an api-key for read-only operations.
|
||||
# If set, all requests must include a header with the api-key.
|
||||
# example header: `api-key: <API-KEY>`
|
||||
#
|
||||
# If you enable this you should also enable TLS.
|
||||
# (Either above or via an external service like nginx.)
|
||||
# Sending an api-key over an unencrypted channel is insecure.
|
||||
#
|
||||
# Uncomment to enable.
|
||||
# read_only_api_key: your_secret_read_only_api_key_here
|
||||
|
||||
# Uncomment to enable JWT Role Based Access Control (RBAC).
|
||||
# If enabled, you can generate JWT tokens with fine-grained rules for access control.
|
||||
# Use generated token instead of API key.
|
||||
#
|
||||
# jwt_rbac: true
|
||||
|
||||
# Hardware reporting adds information to the API responses with a
|
||||
# hint on how many resources were used to execute the request.
|
||||
#
|
||||
# Uncomment to enable.
|
||||
# hardware_reporting: true
|
||||
|
||||
cluster:
|
||||
# Use `enabled: true` to run Qdrant in distributed deployment mode
|
||||
enabled: false
|
||||
|
||||
# Configuration of the inter-cluster communication
|
||||
p2p:
|
||||
# Port for internal communication between peers
|
||||
port: 6335
|
||||
|
||||
# Use TLS for communication between peers
|
||||
enable_tls: false
|
||||
|
||||
# Configuration related to distributed consensus algorithm
|
||||
consensus:
|
||||
# How frequently peers should ping each other.
|
||||
# Setting this parameter to lower value will allow consensus
|
||||
# to detect disconnected nodes earlier, but too frequent
|
||||
# tick period may create significant network and CPU overhead.
|
||||
# We encourage you NOT to change this parameter unless you know what you are doing.
|
||||
tick_period_ms: 100
|
||||
|
||||
# Set to true to prevent service from sending usage statistics to the developers.
|
||||
# Read more: https://qdrant.tech/documentation/guides/telemetry
|
||||
telemetry_disabled: false
|
||||
|
||||
# TLS configuration.
|
||||
# Required if either service.enable_tls or cluster.p2p.enable_tls is true.
|
||||
# tls:
|
||||
# # Server certificate chain file
|
||||
# cert: ./tls/cert.pem
|
||||
|
||||
# # Server private key file
|
||||
# key: ./tls/key.pem
|
||||
|
||||
# # Certificate authority certificate file.
|
||||
# # This certificate will be used to validate the certificates
|
||||
# # presented by other nodes during inter-cluster communication.
|
||||
# #
|
||||
# # If verify_https_client_certificate is true, it will verify
|
||||
# # HTTPS client certificate
|
||||
# #
|
||||
# # Required if cluster.p2p.enable_tls is true.
|
||||
# ca_cert: ./tls/cacert.pem
|
||||
|
||||
# # TTL in seconds to reload certificate from disk, useful for certificate rotations.
|
||||
# # Only works for HTTPS endpoints. Does not support gRPC (and intra-cluster communication).
|
||||
# # If `null` - TTL is disabled.
|
||||
# cert_ttl: 3600
|
||||
@@ -7,7 +7,7 @@ To get started
|
||||
```vlang
|
||||
|
||||
|
||||
import freeflowuniverse.herolib.lib.installers.infra.coredns as coredns_installer
|
||||
import freeflowuniverse.herolib.installers.infra.coredns as coredns_installer
|
||||
|
||||
heroscript:="
|
||||
!!coredns.configure name:'test'
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
module python
|
||||
|
||||
import freeflowuniverse.herolib.data.paramsparser
|
||||
import freeflowuniverse.herolib.data.encoderhero
|
||||
import os
|
||||
|
||||
pub const version = '3.12.0'
|
||||
const singleton = true
|
||||
|
||||
44
lib/mcp/aiprompt.md
Normal file
44
lib/mcp/aiprompt.md
Normal file
@@ -0,0 +1,44 @@
|
||||
|
||||
make an mcp server in @lib/mcp/v_do
|
||||
|
||||
use the Standard Input/Output (stdio) transport as described in
|
||||
https://modelcontextprotocol.io/docs/concepts/transports
|
||||
|
||||
The tool has following methods
|
||||
|
||||
## test
|
||||
- args: $fullpath
|
||||
- cmd: 'v -gc none -stats -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc test ${fullpath}'
|
||||
|
||||
if the file is a dir then find the .v files (non recursive) and do it for each opf those
|
||||
|
||||
collect the output and return
|
||||
|
||||
## run
|
||||
- args: $fullpath
|
||||
- cmd: 'v -gc none -stats -enable-globals -n -w -cg -g -cc tcc run ${fullpath}'
|
||||
|
||||
if the file is a dir then find the .v files (non recursive) and do it for each opf those
|
||||
|
||||
collect the output and return
|
||||
|
||||
|
||||
## compile
|
||||
- args: $fullpath
|
||||
- cmd: 'cd /tmp && v -gc none -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc ${fullpath}'
|
||||
|
||||
if the file is a dir then find the .v files (non recursive) and do it for each opf those
|
||||
|
||||
collect the output and return
|
||||
|
||||
|
||||
## vet
|
||||
- args: $fullpath
|
||||
- cmd: 'v vet -v -w ${fullpath}'
|
||||
|
||||
if the file is a dir then find the .v files (non recursive) and do it for each opf those
|
||||
|
||||
collect the output and return
|
||||
|
||||
|
||||
|
||||
92
lib/mcp/v_do/README.md
Normal file
92
lib/mcp/v_do/README.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# V-Do MCP Server
|
||||
|
||||
An implementation of the [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) server for V language operations. This server uses the Standard Input/Output (stdio) transport as described in the [MCP documentation](https://modelcontextprotocol.io/docs/concepts/transports).
|
||||
|
||||
## Features
|
||||
|
||||
The server supports the following operations:
|
||||
|
||||
1. **test** - Run V tests on a file or directory
|
||||
2. **run** - Execute V code from a file or directory
|
||||
3. **compile** - Compile V code from a file or directory
|
||||
4. **vet** - Run V vet on a file or directory
|
||||
|
||||
## Usage
|
||||
|
||||
### Building the Server
|
||||
|
||||
```bash
|
||||
v -gc none -stats -enable-globals -n -w -cg -g -cc tcc /Users/despiegk/code/github/freeflowuniverse/herolib/lib/mcp/v_do
|
||||
```
|
||||
|
||||
### Using the Server
|
||||
|
||||
The server communicates using the MCP protocol over stdio. To send a request, use the following format:
|
||||
|
||||
```
|
||||
Content-Length: <length>
|
||||
|
||||
{"jsonrpc":"2.0","id":"<request-id>","method":"<method-name>","params":{"fullpath":"<path-to-file-or-directory>"}}
|
||||
```
|
||||
|
||||
Where:
|
||||
- `<length>` is the length of the JSON message in bytes
|
||||
- `<request-id>` is a unique identifier for the request
|
||||
- `<method-name>` is one of: `test`, `run`, `compile`, or `vet`
|
||||
- `<path-to-file-or-directory>` is the absolute path to the V file or directory to process
|
||||
|
||||
### Example
|
||||
|
||||
Request:
|
||||
```
|
||||
Content-Length: 85
|
||||
|
||||
{"jsonrpc":"2.0","id":"1","method":"test","params":{"fullpath":"/path/to/file.v"}}
|
||||
```
|
||||
|
||||
Response:
|
||||
```
|
||||
Content-Length: 245
|
||||
|
||||
{"jsonrpc":"2.0","id":"1","result":{"output":"Command: v -gc none -stats -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc test /path/to/file.v\nExit code: 0\nOutput:\nAll tests passed!"}}
|
||||
```
|
||||
|
||||
## Methods
|
||||
|
||||
### test
|
||||
|
||||
Runs V tests on the specified file or directory.
|
||||
|
||||
Command used:
|
||||
```
|
||||
v -gc none -stats -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc test ${fullpath}
|
||||
```
|
||||
|
||||
If a directory is specified, it will run tests on all `.v` files in the directory (non-recursive).
|
||||
|
||||
### run
|
||||
|
||||
Executes the specified V file or all V files in a directory.
|
||||
|
||||
Command used:
|
||||
```
|
||||
v -gc none -stats -enable-globals -n -w -cg -g -cc tcc run ${fullpath}
|
||||
```
|
||||
|
||||
### compile
|
||||
|
||||
Compiles the specified V file or all V files in a directory.
|
||||
|
||||
Command used:
|
||||
```
|
||||
cd /tmp && v -gc none -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc ${fullpath}
|
||||
```
|
||||
|
||||
### vet
|
||||
|
||||
Runs V vet on the specified file or directory.
|
||||
|
||||
Command used:
|
||||
```
|
||||
v vet -v -w ${fullpath}
|
||||
```
|
||||
4
lib/mcp/v_do/handlers/vcompile.v
Normal file
4
lib/mcp/v_do/handlers/vcompile.v
Normal file
@@ -0,0 +1,4 @@
|
||||
module handlers
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.mcp.v_do.logger
|
||||
21
lib/mcp/v_do/handlers/vlist.v
Normal file
21
lib/mcp/v_do/handlers/vlist.v
Normal file
@@ -0,0 +1,21 @@
|
||||
module handlers
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.mcp.v_do.logger
|
||||
|
||||
// list_v_files returns all .v files in a directory (non-recursive), excluding generated files ending with _.v
|
||||
fn list_v_files(dir string) ![]string {
|
||||
files := os.ls(dir) or {
|
||||
return error('Error listing directory: $err')
|
||||
}
|
||||
|
||||
mut v_files := []string{}
|
||||
for file in files {
|
||||
if file.ends_with('.v') && !file.ends_with('_.v') {
|
||||
filepath := os.join_path(dir, file)
|
||||
v_files << filepath
|
||||
}
|
||||
}
|
||||
|
||||
return v_files
|
||||
}
|
||||
4
lib/mcp/v_do/handlers/vrun.v
Normal file
4
lib/mcp/v_do/handlers/vrun.v
Normal file
@@ -0,0 +1,4 @@
|
||||
module handlers
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.mcp.v_do.logger
|
||||
31
lib/mcp/v_do/handlers/vtest.v
Normal file
31
lib/mcp/v_do/handlers/vtest.v
Normal file
@@ -0,0 +1,31 @@
|
||||
module handlers
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.mcp.v_do.logger
|
||||
|
||||
// test runs v test on the specified file or directory
|
||||
pub fn vtest(fullpath string) !string {
|
||||
logger.info('test $fullpath')
|
||||
if !os.exists(fullpath) {
|
||||
return error('File or directory does not exist: $fullpath')
|
||||
}
|
||||
if os.is_dir(fullpath) {
|
||||
mut results:=""
|
||||
for item in list_v_files(fullpath)!{
|
||||
results += vtest(item)!
|
||||
results += '\n-----------------------\n'
|
||||
}
|
||||
return results
|
||||
}else{
|
||||
cmd := 'v -gc none -stats -enable-globals -show-c-output -keepc -n -w -cg -o /tmp/tester.c -g -cc tcc test ${fullpath}'
|
||||
logger.debug('Executing command: $cmd')
|
||||
result := os.execute(cmd)
|
||||
if result.exit_code != 0 {
|
||||
return error('Test failed for $fullpath with exit code ${result.exit_code}\n${result.output}')
|
||||
} else {
|
||||
logger.info('Test completed for $fullpath')
|
||||
}
|
||||
return 'Command: $cmd\nExit code: ${result.exit_code}\nOutput:\n${result.output}'
|
||||
}
|
||||
|
||||
}
|
||||
42
lib/mcp/v_do/handlers/vvet.v
Normal file
42
lib/mcp/v_do/handlers/vvet.v
Normal file
@@ -0,0 +1,42 @@
|
||||
module handlers
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.mcp.v_do.logger
|
||||
|
||||
// vvet runs v vet on the specified file or directory
|
||||
pub fn vvet(fullpath string) !string {
|
||||
logger.info('vet $fullpath')
|
||||
if !os.exists(fullpath) {
|
||||
return error('File or directory does not exist: $fullpath')
|
||||
}
|
||||
|
||||
if os.is_dir(fullpath) {
|
||||
mut results := ""
|
||||
files := list_v_files(fullpath) or {
|
||||
return error('Error listing V files: $err')
|
||||
}
|
||||
for file in files {
|
||||
results += vet_file(file) or {
|
||||
logger.error('Failed to vet $file: $err')
|
||||
return error('Failed to vet $file: $err')
|
||||
}
|
||||
results += '\n-----------------------\n'
|
||||
}
|
||||
return results
|
||||
} else {
|
||||
return vet_file(fullpath)
|
||||
}
|
||||
}
|
||||
|
||||
// vet_file runs v vet on a single file
|
||||
fn vet_file(file string) !string {
|
||||
cmd := 'v vet -v -w ${file}'
|
||||
logger.debug('Executing command: $cmd')
|
||||
result := os.execute(cmd)
|
||||
if result.exit_code != 0 {
|
||||
return error('Vet failed for $file with exit code ${result.exit_code}\n${result.output}')
|
||||
} else {
|
||||
logger.info('Vet completed for $file')
|
||||
}
|
||||
return 'Command: $cmd\nExit code: ${result.exit_code}\nOutput:\n${result.output}'
|
||||
}
|
||||
50
lib/mcp/v_do/logger/logger.v
Normal file
50
lib/mcp/v_do/logger/logger.v
Normal file
@@ -0,0 +1,50 @@
|
||||
module logger
|
||||
|
||||
import os
|
||||
|
||||
// LogLevel defines the severity of log messages
|
||||
pub enum LogLevel {
|
||||
debug
|
||||
info
|
||||
warn
|
||||
error
|
||||
fatal
|
||||
}
|
||||
|
||||
// log outputs a message to stderr with the specified log level
|
||||
pub fn log(level LogLevel, message string) {
|
||||
level_str := match level {
|
||||
.debug { 'DEBUG' }
|
||||
.info { 'INFO ' }
|
||||
.warn { 'WARN ' }
|
||||
.error { 'ERROR' }
|
||||
.fatal { 'FATAL' }
|
||||
}
|
||||
eprintln('[$level_str] $message')
|
||||
}
|
||||
|
||||
// debug logs a debug message to stderr
|
||||
pub fn debug(message string) {
|
||||
log(.debug, message)
|
||||
}
|
||||
|
||||
// info logs an info message to stderr
|
||||
pub fn info(message string) {
|
||||
log(.info, message)
|
||||
}
|
||||
|
||||
// warn logs a warning message to stderr
|
||||
pub fn warn(message string) {
|
||||
log(.warn, message)
|
||||
}
|
||||
|
||||
// error logs an error message to stderr
|
||||
pub fn error(message string) {
|
||||
log(.error, message)
|
||||
}
|
||||
|
||||
// fatal logs a fatal error message to stderr and exits the program
|
||||
pub fn fatal(message string) {
|
||||
log(.fatal, message)
|
||||
exit(1)
|
||||
}
|
||||
188
lib/mcp/v_do/server.v
Normal file
188
lib/mcp/v_do/server.v
Normal file
@@ -0,0 +1,188 @@
|
||||
module v_do
|
||||
|
||||
import json
|
||||
import os
|
||||
import freeflowuniverse.herolib.mcp.v_do.handlers
|
||||
import freeflowuniverse.herolib.mcp.v_do.logger
|
||||
|
||||
// MCP server implementation using stdio transport
|
||||
// Based on https://modelcontextprotocol.io/docs/concepts/transports
|
||||
|
||||
// MCPRequest represents an MCP request message
|
||||
struct MCPRequest {
|
||||
id string
|
||||
method string
|
||||
params map[string]string
|
||||
jsonrpc string = '2.0'
|
||||
}
|
||||
|
||||
// MCPResponse represents an MCP response
|
||||
struct MCPResponse {
|
||||
id string
|
||||
result map[string]string
|
||||
jsonrpc string = '2.0'
|
||||
}
|
||||
|
||||
// MCPErrorResponse represents an MCP error response
|
||||
struct MCPErrorResponse {
|
||||
id string
|
||||
error MCPError
|
||||
jsonrpc string = '2.0'
|
||||
}
|
||||
|
||||
// MCPError represents an error in an MCP response
|
||||
struct MCPError {
|
||||
code int
|
||||
message string
|
||||
}
|
||||
|
||||
// Server is the main MCP server struct
|
||||
pub struct Server {}
|
||||
|
||||
// new_server creates a new MCP server
|
||||
pub fn new_server() &Server {
|
||||
return &Server{}
|
||||
}
|
||||
|
||||
// start starts the MCP server
|
||||
pub fn (mut s Server) start() ! {
|
||||
logger.info('Starting V-Do MCP server')
|
||||
for {
|
||||
message := s.read_message() or {
|
||||
logger.error('Failed to parse message: $err')
|
||||
s.send_error('0', -32700, 'Failed to parse message: $err')
|
||||
continue
|
||||
}
|
||||
|
||||
logger.debug('Received message: ${message.method}')
|
||||
s.handle_message(message) or {
|
||||
logger.error('Internal error: $err')
|
||||
s.send_error(message.id, -32603, 'Internal error: $err')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read_message reads an MCP message from stdin
|
||||
fn (mut s Server) read_message() !MCPRequest {
|
||||
mut content_length := 0
|
||||
|
||||
// Read headers
|
||||
for {
|
||||
line := read_line_from_stdin() or {
|
||||
logger.error('Failed to read line: $err')
|
||||
return error('Failed to read line: $err')
|
||||
}
|
||||
if line.len == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if line.starts_with('Content-Length:') {
|
||||
content_length_str := line.all_after('Content-Length:').trim_space()
|
||||
content_length = content_length_str.int()
|
||||
}
|
||||
}
|
||||
|
||||
if content_length == 0 {
|
||||
logger.error('No Content-Length header found')
|
||||
return error('No Content-Length header found')
|
||||
}
|
||||
|
||||
// Read message body
|
||||
body := read_content_from_stdin(content_length) or {
|
||||
logger.error('Failed to read content: $err')
|
||||
return error('Failed to read content: $err')
|
||||
}
|
||||
|
||||
// Parse JSON
|
||||
message := json.decode(MCPRequest, body) or {
|
||||
logger.error('Failed to decode JSON: $err')
|
||||
return error('Failed to decode JSON: $err')
|
||||
}
|
||||
|
||||
return message
|
||||
}
|
||||
|
||||
// read_line_from_stdin reads a line from stdin
|
||||
fn read_line_from_stdin() !string {
|
||||
line := os.get_line()
|
||||
return line
|
||||
}
|
||||
|
||||
// read_content_from_stdin reads content from stdin with the specified length
|
||||
fn read_content_from_stdin(length int) !string {
|
||||
// For MCP protocol, we need to read exactly the content length
|
||||
mut content := ''
|
||||
mut reader := os.stdin()
|
||||
mut buf := []u8{len: length}
|
||||
n := reader.read(mut buf) or {
|
||||
logger.error('Failed to read from stdin: $err')
|
||||
return error('Failed to read from stdin: $err')
|
||||
}
|
||||
|
||||
if n < length {
|
||||
logger.error('Expected to read $length bytes, but got $n')
|
||||
return error('Expected to read $length bytes, but got $n')
|
||||
}
|
||||
|
||||
content = buf[..n].bytestr()
|
||||
return content
|
||||
}
|
||||
|
||||
// handle_message handles an MCP message
|
||||
fn (mut s Server) handle_message(message MCPRequest) ! {
|
||||
match message.method {
|
||||
'test' {
|
||||
fullpath := message.params['fullpath'] or {
|
||||
logger.error('Missing fullpath parameter')
|
||||
s.send_error(message.id, -32602, 'Missing fullpath parameter')
|
||||
return error('Missing fullpath parameter')
|
||||
}
|
||||
logger.info('Running test on $fullpath')
|
||||
result := handlers.vtest(fullpath) or {
|
||||
logger.error('Test failed: $err')
|
||||
s.send_error(message.id, -32000, 'Test failed: $err')
|
||||
return err
|
||||
}
|
||||
s.send_response(message.id, {'output': result})
|
||||
}
|
||||
else {
|
||||
logger.error('Unknown method: ${message.method}')
|
||||
s.send_error(message.id, -32601, 'Unknown method: ${message.method}')
|
||||
return error('Unknown method: ${message.method}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send_response sends an MCP response
|
||||
fn (mut s Server) send_response(id string, result map[string]string) {
|
||||
response := MCPResponse{
|
||||
id: id
|
||||
result: result
|
||||
}
|
||||
|
||||
json_str := json.encode(response)
|
||||
logger.debug('Sending response for id: $id')
|
||||
s.write_message(json_str)
|
||||
}
|
||||
|
||||
// send_error sends an MCP error response
|
||||
fn (mut s Server) send_error(id string, code int, message string) {
|
||||
logger.error('Sending error response: $message (code: $code, id: $id)')
|
||||
error_response := MCPErrorResponse{
|
||||
id: id
|
||||
error: MCPError{
|
||||
code: code
|
||||
message: message
|
||||
}
|
||||
}
|
||||
|
||||
json_str := json.encode(error_response)
|
||||
s.write_message(json_str)
|
||||
}
|
||||
|
||||
// write_message writes an MCP message to stdout
|
||||
fn (mut s Server) write_message(content string) {
|
||||
header := 'Content-Length: ${content.len}\r\n\r\n'
|
||||
print(header)
|
||||
print(content)
|
||||
}
|
||||
105
lib/mcp/v_do/test_client.vsh
Normal file
105
lib/mcp/v_do/test_client.vsh
Normal file
@@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
import os
|
||||
import flag
|
||||
import json
|
||||
|
||||
// Simple test client for the V-Do MCP server
|
||||
// This script sends test requests to the MCP server and displays the responses
|
||||
|
||||
struct MCPRequest {
|
||||
id string
|
||||
method string
|
||||
params map[string]string
|
||||
jsonrpc string = '2.0'
|
||||
}
|
||||
|
||||
fn send_request(method string, fullpath string) {
|
||||
// Create the request
|
||||
request := MCPRequest{
|
||||
id: '1'
|
||||
method: method
|
||||
params: {
|
||||
'fullpath': fullpath
|
||||
}
|
||||
}
|
||||
|
||||
// Encode to JSON
|
||||
json_str := json.encode(request)
|
||||
|
||||
// Format the message with headers
|
||||
message := 'Content-Length: ${json_str.len}\r\n\r\n${json_str}'
|
||||
|
||||
// Write to a temporary file
|
||||
os.write_file('/tmp/mcp_request.txt', message) or {
|
||||
eprintln('Failed to write request to file: $err')
|
||||
return
|
||||
}
|
||||
|
||||
// Execute the MCP server with the request
|
||||
cmd := 'cat /tmp/mcp_request.txt | v run /Users/despiegk/code/github/freeflowuniverse/herolib/lib/mcp/v_do/main.v'
|
||||
result := os.execute(cmd)
|
||||
|
||||
if result.exit_code != 0 {
|
||||
eprintln('Error executing MCP server: ${result.output}')
|
||||
return
|
||||
}
|
||||
|
||||
// Parse and display the response
|
||||
response := result.output
|
||||
println('Raw response:')
|
||||
println('-----------------------------------')
|
||||
println(response)
|
||||
println('-----------------------------------')
|
||||
|
||||
// Try to extract the JSON part
|
||||
if response.contains('{') && response.contains('}') {
|
||||
json_start := response.index_after('{', 0)
|
||||
json_end := response.last_index_of('}')
|
||||
if json_start >= 0 && json_end >= 0 && json_end > json_start {
|
||||
json_part := response[json_start-1..json_end+1]
|
||||
println('Extracted JSON:')
|
||||
println(json_part)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse command line arguments
|
||||
mut fp := flag.new_flag_parser(os.args)
|
||||
fp.application('test_client.vsh')
|
||||
fp.version('v0.1.0')
|
||||
fp.description('Test client for V-Do MCP server')
|
||||
fp.skip_executable()
|
||||
|
||||
method := fp.string('method', `m`, 'test', 'Method to call (test, run, compile, vet)')
|
||||
fullpath := fp.string('path', `p`, '', 'Path to the file or directory to process')
|
||||
help_requested := fp.bool('help', `h`, false, 'Show help message')
|
||||
|
||||
if help_requested {
|
||||
println(fp.usage())
|
||||
exit(0)
|
||||
}
|
||||
|
||||
additional_args := fp.finalize() or {
|
||||
eprintln(err)
|
||||
println(fp.usage())
|
||||
exit(1)
|
||||
}
|
||||
|
||||
if fullpath == '' {
|
||||
eprintln('Error: Path is required')
|
||||
println(fp.usage())
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Validate method
|
||||
valid_methods := ['test', 'run', 'compile', 'vet']
|
||||
if method !in valid_methods {
|
||||
eprintln('Error: Invalid method. Must be one of: ${valid_methods}')
|
||||
println(fp.usage())
|
||||
exit(1)
|
||||
}
|
||||
|
||||
// Send the request
|
||||
println('Sending $method request for $fullpath...')
|
||||
send_request(method, fullpath)
|
||||
12
lib/mcp/v_do/vdo.v
Normal file
12
lib/mcp/v_do/vdo.v
Normal file
@@ -0,0 +1,12 @@
|
||||
module v_do
|
||||
|
||||
import freeflowuniverse.herolib.mcp.v_do.logger
|
||||
|
||||
fn main() {
|
||||
logger.info('Starting V-Do server')
|
||||
mut server := new_server()
|
||||
server.start() or {
|
||||
logger.fatal('Error starting server: $err')
|
||||
exit(1)
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
This module provides functionality for managing DNS records in Redis for use with CoreDNS. It supports various DNS record types and provides a simple interface for adding and managing DNS records.
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.lib.osal.coredns
|
||||
import freeflowuniverse.herolib.osal.coredns
|
||||
|
||||
// Create a new DNS record set
|
||||
mut rs := coredns.new_dns_record_set()
|
||||
|
||||
@@ -98,6 +98,7 @@ pub fn (mut self ScreensFactory) add(args_ ScreenAddArgs) !Screen {
|
||||
name: args.name
|
||||
cmd: args.cmd
|
||||
}
|
||||
// println(self.screens)
|
||||
if args.start {
|
||||
self.start(args.name)!
|
||||
}
|
||||
|
||||
@@ -45,8 +45,8 @@ pub fn get(args StartupManagerArgs) !StartupManager {
|
||||
if args.cat == .unknown {
|
||||
if zinit.check() {
|
||||
sm.cat = .zinit
|
||||
} else if systemd.check()! {
|
||||
sm.cat = .systemd
|
||||
}else {
|
||||
sm.cat = .screen
|
||||
}
|
||||
}
|
||||
return sm
|
||||
@@ -76,11 +76,11 @@ pub fn (mut sm StartupManager) new(args zinit.ZProcessNewArgs) ! {
|
||||
match mycat {
|
||||
.screen {
|
||||
mut scr := screen.new(reset: false)!
|
||||
console.print_debug('screen')
|
||||
console.print_debug('screen startup manager ${args.name} cmd:${args.cmd}')
|
||||
_ = scr.add(name: args.name, cmd: args.cmd, reset: args.restart)!
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('systemd start ${args.name}')
|
||||
// console.print_debug('systemd start ${args.name}')
|
||||
mut systemdfactory := systemd.new()!
|
||||
systemdfactory.new(
|
||||
cmd: args.cmd
|
||||
@@ -309,6 +309,14 @@ pub fn (mut sm StartupManager) output(name string) !string {
|
||||
}
|
||||
|
||||
pub fn (mut sm StartupManager) exists(name string) !bool {
|
||||
println(sm.cat)
|
||||
if sm.cat == .unknown {
|
||||
if zinit.check() {
|
||||
sm.cat = .zinit
|
||||
}else {
|
||||
sm.cat = .screen
|
||||
}
|
||||
}
|
||||
match sm.cat {
|
||||
.screen {
|
||||
mut scr := screen.new(reset: false) or { panic("can't get screen") }
|
||||
|
||||
@@ -14,7 +14,7 @@ The module allows you to:
|
||||
## Usage Example
|
||||
|
||||
```v
|
||||
import freeflowuniverse.herolib.lib.osal.traefik
|
||||
import freeflowuniverse.herolib.osal.traefik
|
||||
|
||||
fn main() ! {
|
||||
// Create a new Traefik configuration
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
# startup manager
|
||||
|
||||
```go
|
||||
import freeflowuniverse.herolib.sysadmin.startupmanager
|
||||
mut sm:=startupmanager.get()!
|
||||
|
||||
|
||||
sm.start(
|
||||
name: 'myscreen'
|
||||
cmd: 'htop'
|
||||
description: '...'
|
||||
)!
|
||||
|
||||
```
|
||||
|
||||
## some basic commands for screen
|
||||
|
||||
```bash
|
||||
#list the screens
|
||||
screen -ls
|
||||
#attach to the screens
|
||||
screen -r myscreen
|
||||
```
|
||||
|
||||
to exit a screen to
|
||||
|
||||
```
|
||||
ctrl a d
|
||||
```
|
||||
@@ -1,380 +0,0 @@
|
||||
module startupmanager
|
||||
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.osal.screen
|
||||
import freeflowuniverse.herolib.osal.systemd
|
||||
import freeflowuniverse.herolib.osal.zinit
|
||||
|
||||
// // TODO: check if using this interface would simplify things
|
||||
// pub interface StartupManagerI {
|
||||
// new(args zinit.ZProcessNewArgs)!
|
||||
// start(name string)!
|
||||
// stop(name string)!
|
||||
// restart(name string)!
|
||||
// delete(name string)!
|
||||
// status(name string) !ProcessStatus
|
||||
// running(name string) !bool
|
||||
// output(name string) !string
|
||||
// exists(name string) !bool
|
||||
// list_services() ![]string
|
||||
// }
|
||||
|
||||
pub enum StartupManagerType {
|
||||
unknown
|
||||
screen
|
||||
zinit
|
||||
tmux
|
||||
systemd
|
||||
}
|
||||
|
||||
pub struct StartupManager {
|
||||
pub mut:
|
||||
cat StartupManagerType
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct StartupManagerArgs {
|
||||
pub mut:
|
||||
cat StartupManagerType
|
||||
}
|
||||
|
||||
pub fn get(args StartupManagerArgs) !StartupManager {
|
||||
mut sm := StartupManager{
|
||||
cat: args.cat
|
||||
}
|
||||
if args.cat == .unknown {
|
||||
if zinit.check() {
|
||||
sm.cat = .zinit
|
||||
} else if systemd.check()! {
|
||||
sm.cat = .systemd
|
||||
}
|
||||
}
|
||||
return sm
|
||||
}
|
||||
|
||||
// launch a new process
|
||||
//```
|
||||
// name string @[required]
|
||||
// cmd string @[required]
|
||||
// cmd_stop string
|
||||
// cmd_test string //command line to test service is running
|
||||
// status ZProcessStatus
|
||||
// pid int
|
||||
// after []string //list of service we depend on
|
||||
// env map[string]string
|
||||
// oneshot bool
|
||||
// start bool = true
|
||||
// restart bool = true // whether the process should be restarted on failure
|
||||
// description string //not used in zinit
|
||||
//```
|
||||
pub fn (mut sm StartupManager) new(args zinit.ZProcessNewArgs) ! {
|
||||
console.print_debug("startupmanager start:${args.name} cmd:'${args.cmd}' restart:${args.restart}")
|
||||
mut mycat := sm.cat
|
||||
if args.startuptype == .systemd {
|
||||
mycat = .systemd
|
||||
}
|
||||
match mycat {
|
||||
.screen {
|
||||
mut scr := screen.new(reset: false)!
|
||||
console.print_debug('screen')
|
||||
_ = scr.add(name: args.name, cmd: args.cmd, reset: args.restart)!
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('systemd start ${args.name}')
|
||||
mut systemdfactory := systemd.new()!
|
||||
systemdfactory.new(
|
||||
cmd: args.cmd
|
||||
name: args.name
|
||||
description: args.description
|
||||
start: args.start
|
||||
restart: args.restart
|
||||
env: args.env
|
||||
)!
|
||||
}
|
||||
.zinit {
|
||||
console.print_debug('zinit start ${args.name}')
|
||||
mut zinitfactory := zinit.new()!
|
||||
// pub struct ZProcessNewArgs {
|
||||
// name string @[required]
|
||||
// cmd string @[required]
|
||||
// cmd_stop string
|
||||
// cmd_test string
|
||||
// cmd_file bool // if we wanna force to run it as a file which is given to bash -c (not just a cmd in zinit)
|
||||
// test string
|
||||
// test_file bool
|
||||
// after []string
|
||||
// env map[string]string
|
||||
// oneshot bool
|
||||
// }
|
||||
zinitfactory.new(args)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now: ${mycat}')
|
||||
}
|
||||
}
|
||||
// if args.start {
|
||||
// sm.start(args.name)!
|
||||
// } else if args.restart {
|
||||
// sm.restart(args.name)!
|
||||
// }
|
||||
}
|
||||
|
||||
pub fn (mut sm StartupManager) start(name string) ! {
|
||||
match sm.cat {
|
||||
.screen {
|
||||
return
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('systemd process start ${name}')
|
||||
mut systemdfactory := systemd.new()!
|
||||
if systemdfactory.exists(name) {
|
||||
// console.print_header("*************")
|
||||
mut systemdprocess := systemdfactory.get(name)!
|
||||
systemdprocess.start()!
|
||||
} else {
|
||||
return error('process in systemd with name ${name} not found')
|
||||
}
|
||||
}
|
||||
.zinit {
|
||||
console.print_debug('zinit process start ${name}')
|
||||
mut zinitfactory := zinit.new()!
|
||||
zinitfactory.start(name)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut sm StartupManager) stop(name string) ! {
|
||||
match sm.cat {
|
||||
.screen {
|
||||
mut screen_factory := screen.new(reset: false)!
|
||||
mut scr := screen_factory.get(name) or { return }
|
||||
scr.cmd_send('^C')!
|
||||
screen_factory.kill(name)!
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('systemd stop ${name}')
|
||||
mut systemdfactory := systemd.new()!
|
||||
if systemdfactory.exists(name) {
|
||||
mut systemdprocess := systemdfactory.get(name)!
|
||||
systemdprocess.stop()!
|
||||
}
|
||||
}
|
||||
.zinit {
|
||||
console.print_debug('zinit stop ${name}')
|
||||
mut zinitfactory := zinit.new()!
|
||||
zinitfactory.load()!
|
||||
if zinitfactory.exists(name) {
|
||||
zinitfactory.stop(name)!
|
||||
}
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// kill the process by name
|
||||
pub fn (mut sm StartupManager) restart(name string) ! {
|
||||
match sm.cat {
|
||||
.screen {
|
||||
panic('implement')
|
||||
}
|
||||
.systemd {
|
||||
console.print_debug('systemd restart ${name}')
|
||||
mut systemdfactory := systemd.new()!
|
||||
mut systemdprocess := systemdfactory.get(name)!
|
||||
systemdprocess.restart()!
|
||||
}
|
||||
.zinit {
|
||||
console.print_debug('zinit restart ${name}')
|
||||
mut zinitfactory := zinit.new()!
|
||||
zinitfactory.stop(name)!
|
||||
zinitfactory.start(name)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove from the startup manager
|
||||
pub fn (mut sm StartupManager) delete(name string) ! {
|
||||
match sm.cat {
|
||||
.screen {
|
||||
mut screen_factory := screen.new(reset: false)!
|
||||
mut scr := screen_factory.get(name) or { return }
|
||||
scr.cmd_send('^C')!
|
||||
screen_factory.kill(name)!
|
||||
}
|
||||
.systemd {
|
||||
mut systemdfactory := systemd.new()!
|
||||
mut systemdprocess := systemdfactory.get(name)!
|
||||
systemdprocess.delete()!
|
||||
}
|
||||
.zinit {
|
||||
mut zinitfactory := zinit.new()!
|
||||
zinitfactory.load()!
|
||||
if zinitfactory.exists(name) {
|
||||
zinitfactory.delete(name)!
|
||||
}
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum ProcessStatus {
|
||||
unknown
|
||||
active
|
||||
inactive
|
||||
failed
|
||||
activating
|
||||
deactivating
|
||||
}
|
||||
|
||||
// remove from the startup manager
|
||||
pub fn (mut sm StartupManager) status(name string) !ProcessStatus {
|
||||
match sm.cat {
|
||||
.screen {
|
||||
mut screen_factory := screen.new(reset: false)!
|
||||
mut scr := screen_factory.get(name) or {
|
||||
return error('process with name ${name} not found')
|
||||
}
|
||||
match scr.status()! {
|
||||
.active { return .active }
|
||||
.inactive { return .inactive }
|
||||
.unknown { return .unknown }
|
||||
}
|
||||
}
|
||||
.systemd {
|
||||
mut systemdfactory := systemd.new()!
|
||||
mut systemdprocess := systemdfactory.get(name) or { return .unknown }
|
||||
systemd_status := systemdprocess.status() or {
|
||||
return error('Failed to get status of process ${name}\n${err}')
|
||||
}
|
||||
s := ProcessStatus.from(systemd_status.str())!
|
||||
return s
|
||||
}
|
||||
.zinit {
|
||||
mut zinitfactory := zinit.new()!
|
||||
mut p := zinitfactory.get(name) or { return .unknown }
|
||||
// unknown
|
||||
// init
|
||||
// ok
|
||||
// killed
|
||||
// error
|
||||
// blocked
|
||||
// spawned
|
||||
match mut p.status()! {
|
||||
.init { return .activating }
|
||||
.ok { return .active }
|
||||
.error { return .failed }
|
||||
.blocked { return .inactive }
|
||||
.killed { return .inactive }
|
||||
.spawned { return .activating }
|
||||
.unknown { return .unknown }
|
||||
}
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut sm StartupManager) running(name string) !bool {
|
||||
if !sm.exists(name)! {
|
||||
return false
|
||||
}
|
||||
mut s := sm.status(name)!
|
||||
return s == .active
|
||||
}
|
||||
|
||||
// remove from the startup manager
|
||||
pub fn (mut sm StartupManager) output(name string) !string {
|
||||
match sm.cat {
|
||||
.screen {
|
||||
panic('implement')
|
||||
}
|
||||
.systemd {
|
||||
return systemd.journalctl(service: name)!
|
||||
}
|
||||
else {
|
||||
panic('to implement, startup manager only support screen & systemd for now')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut sm StartupManager) exists(name string) !bool {
|
||||
match sm.cat {
|
||||
.screen {
|
||||
mut scr := screen.new(reset: false) or { panic("can't get screen") }
|
||||
return scr.exists(name)
|
||||
}
|
||||
.systemd {
|
||||
// console.print_debug("exists sm systemd ${name}")
|
||||
mut systemdfactory := systemd.new()!
|
||||
return systemdfactory.exists(name)
|
||||
}
|
||||
.zinit {
|
||||
// console.print_debug("exists sm zinit check ${name}")
|
||||
mut zinitfactory := zinit.new()!
|
||||
zinitfactory.load()!
|
||||
return zinitfactory.exists(name)
|
||||
}
|
||||
else {
|
||||
panic('to implement. startup manager only support screen & systemd for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// list all services as known to the startup manager
|
||||
pub fn (mut sm StartupManager) list() ![]string {
|
||||
match sm.cat {
|
||||
.screen {
|
||||
// mut scr := screen.new(reset: false) or { panic("can't get screen") }
|
||||
panic('implement')
|
||||
}
|
||||
.systemd {
|
||||
mut systemdfactory := systemd.new()!
|
||||
return systemdfactory.names()
|
||||
}
|
||||
.zinit {
|
||||
mut zinitfactory := zinit.new()!
|
||||
return zinitfactory.names()
|
||||
}
|
||||
else {
|
||||
panic('to implement. startup manager only support screen & systemd for now: ${sm.cat}')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// THIS IS PROBABLY PART OF OTHER MODULE NOW
|
||||
|
||||
// pub struct SecretArgs {
|
||||
// pub mut:
|
||||
// name string @[required]
|
||||
// cat SecretType
|
||||
// }
|
||||
|
||||
// pub enum SecretType {
|
||||
// normal
|
||||
// }
|
||||
|
||||
// // creates a secret if it doesn exist yet
|
||||
// pub fn (mut sm StartupManager) secret(args SecretArgs) !string {
|
||||
// if !(sm.exists(args.name)) {
|
||||
// return error("can't find screen with name ${args.name}, for secret")
|
||||
// }
|
||||
// key := 'secrets:startup:${args.name}'
|
||||
// mut redis := redisclient.core_get()!
|
||||
// mut secret := redis.get(key)!
|
||||
// if secret.len == 0 {
|
||||
// secret = rand.hex(16)
|
||||
// redis.set(key, secret)!
|
||||
// }
|
||||
// return secret
|
||||
// }
|
||||
@@ -1,34 +0,0 @@
|
||||
module startupmanager
|
||||
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.osal.screen
|
||||
import freeflowuniverse.herolib.osal.systemd
|
||||
|
||||
const process_name = 'testprocess'
|
||||
|
||||
pub fn testsuite_begin() ! {
|
||||
mut sm := get()!
|
||||
if sm.exists(process_name)! {
|
||||
sm.stop(process_name)!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn testsuite_end() ! {
|
||||
mut sm := get()!
|
||||
if sm.exists(process_name)! {
|
||||
sm.stop(process_name)!
|
||||
}
|
||||
}
|
||||
|
||||
// remove from the startup manager
|
||||
pub fn test_status() ! {
|
||||
mut sm := get()!
|
||||
|
||||
sm.start(
|
||||
name: process_name
|
||||
cmd: 'redis-server'
|
||||
)!
|
||||
|
||||
status := sm.status(process_name)!
|
||||
assert status == .active
|
||||
}
|
||||
@@ -246,7 +246,7 @@ pub fn load_config(cfg_dir string) !Config {
|
||||
// Load and parse navbar config
|
||||
navbar_content := os.read_file(os.join_path(cfg_dir, 'navbar.json'))!
|
||||
navbar := json.decode(Navbar, navbar_content) or {
|
||||
eprintln('navbar.json in ${cfg_dir} is not in the right format please fix.\nError: ${err}')
|
||||
eprintln('navbar.json in ${cfg_dir} is not in the right format please fix.\nError: $err')
|
||||
exit(99)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user