diff --git a/README.md b/README.md index f99f3c3c..ebe03530 100644 --- a/README.md +++ b/README.md @@ -5,12 +5,11 @@ a smaller version of crystallib with only the items we need for hero ## automated install ```bash - +curl 'https://raw.githubusercontent.com/freeflowuniverse/herolib/refs/heads/main/install_v.sh' > /tmp/install_v.sh +bash /tmp/install_v.sh --analyzer --herolib ``` - - -## to install v +### details ```bash @@ -18,7 +17,7 @@ a smaller version of crystallib with only the items we need for hero V & HeroLib Installer Script -Usage: /Users/despiegk1/code/github/freeflowuniverse/herolib/install_v.sh [options] +Usage: ~/code/github/freeflowuniverse/herolib/install_v.sh [options] Options: -h, --help Show this help message @@ -28,12 +27,12 @@ Options: --herolib Install our herolib Examples: - /Users/despiegk1/code/github/freeflowuniverse/herolib/install_v.sh - /Users/despiegk1/code/github/freeflowuniverse/herolib/install_v.sh --reset - /Users/despiegk1/code/github/freeflowuniverse/herolib/install_v.sh --remove - /Users/despiegk1/code/github/freeflowuniverse/herolib/install_v.sh --analyzer - /Users/despiegk1/code/github/freeflowuniverse/herolib/install_v.sh --herolib - /Users/despiegk1/code/github/freeflowuniverse/herolib/install_v.sh --reset --analyzer # Fresh install of both + ~/code/github/freeflowuniverse/herolib/install_v.sh + ~/code/github/freeflowuniverse/herolib/install_v.sh --reset + ~/code/github/freeflowuniverse/herolib/install_v.sh --remove + ~/code/github/freeflowuniverse/herolib/install_v.sh --analyzer + ~/code/github/freeflowuniverse/herolib/install_v.sh --herolib + ~/code/github/freeflowuniverse/herolib/install_v.sh --reset --analyzer # Fresh install of both ``` diff --git a/install_herolib.vsh b/install_herolib.vsh index f7a6b1c6..9501949b 100755 --- a/install_herolib.vsh +++ b/install_herolib.vsh @@ -3,6 +3,34 @@ import os import flag +fn addtoscript(tofind string, toadd string) ! { + home_dir := os.home_dir() + mut rc_file := '${home_dir}/.zshrc' + if !os.exists(rc_file) { + rc_file = '${home_dir}/.bashrc' + if !os.exists(rc_file) { + return error('No .zshrc or .bashrc found in home directory') + } + } + + // Read current content + mut content := os.read_file(rc_file)! + + // Remove existing alias if present + lines := content.split('\n') + mut new_lines := []string{} + for line in lines { + if !line.contains(tofind) { + new_lines << line + } + } + new_lines << toadd + // Write back to file + new_content := new_lines.join('\n') + os.write_file(rc_file, new_content)! +} + + vroot := @VROOT abs_dir_of_script := dir(@FILE) @@ -18,8 +46,15 @@ os.mkdir_all('${os.home_dir()}/.vmodules/freeflowuniverse') or { } // Create new symlinks -os.symlink('${abs_dir_of_script}/herolib', '${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or { +os.symlink('${abs_dir_of_script}/lib', '${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or { panic('Failed to create herolib symlink: ${err}') } println('Herolib installation completed successfully!') + +// Add vtest alias +addtoscript('vtest ', 'alias vtest \'v -stats -enable-globals -n -w -gc none -no-retry-compilation -cc tcc test\' %') or { + eprintln('Failed to add vtest alias: ${err}') +} + +println('Added vtest alias to shell configuration') diff --git a/lib/clients/httpconnection/authentication.v b/lib/clients/httpconnection/authentication.v new file mode 100644 index 00000000..512c8d60 --- /dev/null +++ b/lib/clients/httpconnection/authentication.v @@ -0,0 +1,8 @@ +module httpconnection + +import encoding.base64 + +pub fn (mut conn HTTPConnection) basic_auth(username string, password string) { + credentials := base64.encode_str('${username}:${password}') + conn.default_header.add(.authorization, 'Basic ${credentials}') +} diff --git a/lib/clients/httpconnection/caching.v b/lib/clients/httpconnection/caching.v new file mode 100644 index 00000000..ad46d605 --- /dev/null +++ b/lib/clients/httpconnection/caching.v @@ -0,0 +1,100 @@ +module httpconnection + +import crypto.md5 +import json +import net.http { Method } + +// https://cassiomolin.com/2016/09/09/which-http-status-codes-are-cacheable/ +const default_cacheable_codes = [200, 203, 204, 206, 300, 404, 405, 410, 414, 501] + +const unsafe_http_methods = [Method.put, .patch, .post, .delete] + +pub struct CacheConfig { +pub mut: + key string // as used to identity in redis + allowable_methods []Method = [.get, .head] + allowable_codes []int = default_cacheable_codes + disable bool = true // default cache is not working + expire_after int = 3600 // default expire_after is 1h + match_headers bool // cache the request header to be matched later +} + +pub struct Result { +pub mut: + code int + data string +} + +// calculate the key for the cache starting from data and url +fn (mut h HTTPConnection) cache_key(req Request) string { + url := h.url(req).split('!') + encoded_url := md5.hexhash(url[0]) // without params + mut key := 'http:${h.cache.key}:${req.method}:${encoded_url}' + mut req_data := req.data + if h.cache.match_headers { + req_data += json.encode(h.header()) + } + req_data += if url.len > 1 { url[1] } else { '' } // add url param if exist + key += if req_data.len > 0 { ':${md5.hexhash(req_data)}' } else { '' } + return key +} + +// Get request result from cache, return -1 if missed. +fn (mut h HTTPConnection) cache_get(req Request) !Result { + key := h.cache_key(req) + mut data := h.redis.get(key) or { + assert '${err}' == 'none' + // console.print_debug("cache get: ${key} not in redis") + return Result{ + code: -1 + } + } + if data == '' { + // console.print_debug("cache get: ${key} empty data") + return Result{ + code: -1 + } + } + result := json.decode(Result, data) or { + // console.print_debug("cache get: ${key} coud not decode") + return error('failed to decode result with error: ${err}.\ndata:\n${data}') + } + // console.print_debug("cache get: ${key} ok") + return result +} + +// Set response result in cache +fn (mut h HTTPConnection) cache_set(req Request, res Result) ! { + key := h.cache_key(req) + value := json.encode(res) + h.redis.set(key, value)! + h.redis.expire(key, h.cache.expire_after)! +} + +// Invalidate cache for specific url +fn (mut h HTTPConnection) cache_invalidate(req Request) ! { + url := h.url(req).split('!') + encoded_url := md5.hexhash(url[0]) + mut to_drop := []string{} + to_drop << 'http:${h.cache.key}:*:${encoded_url}*' + if req.id.len > 0 { + url_no_id := url[0].trim_string_right('/${req.id}') + encoded_url_no_id := md5.hexhash(url_no_id) + to_drop << 'http:${h.cache.key}:*:${encoded_url_no_id}*' + } + for pattern in to_drop { + all_keys := h.redis.keys(pattern)! + for key in all_keys { + h.redis.del(key)! + } + } +} + +// drop full cache for specific cache_key +pub fn (mut h HTTPConnection) cache_drop() ! { + todrop := 'http:${h.cache.key}*' + all_keys := h.redis.keys(todrop)! + for key in all_keys { + h.redis.del(key)! + } +} diff --git a/lib/clients/httpconnection/connection.v b/lib/clients/httpconnection/connection.v new file mode 100644 index 00000000..2ca35d3d --- /dev/null +++ b/lib/clients/httpconnection/connection.v @@ -0,0 +1,22 @@ +module httpconnection + +import net.http { Header } +import freeflowuniverse.herolib.clients.redisclient { Redis } + +@[heap] +pub struct HTTPConnection { +pub mut: + redis Redis @[str: skip] + base_url string // the base url + default_header Header + cache CacheConfig + retry int = 5 +} + +// Join headers from httpconnection and Request +fn (mut h HTTPConnection) header(req Request) Header { + mut header := req.header or { return h.default_header } + + return h.default_header.join(header) +} + diff --git a/lib/clients/httpconnection/connection_methods.v b/lib/clients/httpconnection/connection_methods.v new file mode 100644 index 00000000..ac45e4d7 --- /dev/null +++ b/lib/clients/httpconnection/connection_methods.v @@ -0,0 +1,212 @@ +// /* +// METHODS NOTES +// * Our target to wrap the default http methods used in V to be cached using redis +// * By default cache enabled in all Request, if you need to disable cache, set req.cache_disable true +// * +// * Flow will be: +// * 1 - Check cache if enabled try to get result from cache +// * 2 - Check result +// * 3 - Do request, if needed +// * 4 - Set in cache if enabled or invalidate cache +// * 5 - Return result + +// Suggestion: Send function now enough to do what we want, no need to any post*, get* additional functions +// */ + +module httpconnection + +import x.json2 +import net.http +import freeflowuniverse.herolib.core.herojson +import freeflowuniverse.herolib.ui.console + +// Build url from Request and httpconnection +fn (mut h HTTPConnection) url(req Request) string { + mut u := '${h.base_url}/${req.prefix.trim('/')}' + if req.id.len > 0 { + u += '/${req.id}' + } + if req.params.len > 0 && req.method != .post { + u += '?${http.url_encode_form_data(req.params)}' + } + return u +} + +// Return if request cacheable, depeds on connection cache and request arguments. +fn (h HTTPConnection) is_cacheable(req Request) bool { + return !(h.cache.disable || req.cache_disable) && req.method in h.cache.allowable_methods +} + +// Return true if we need to invalidate cache after unsafe method +fn (h HTTPConnection) needs_invalidate(req Request, result_code int) bool { + return !(h.cache.disable || req.cache_disable) && req.method in unsafe_http_methods + && req.method !in h.cache.allowable_methods && result_code >= 200 && result_code <= 399 +} + +// Core fucntion to be used in all other function +pub fn (mut h HTTPConnection) send(req_ Request) !Result { + mut result := Result{} + mut response := http.Response{} + mut err_message := '' + mut from_cache := false // used to know if result came from cache + mut req := req_ + + is_cacheable := h.is_cacheable(req) + // console.print_debug("is cacheable: ${is_cacheable}") + + // 1 - Check cache if enabled try to get result from cache + if is_cacheable { + result = h.cache_get(req)! + if result.code != -1 { + from_cache = true + } + } + // 2 - Check result + if result.code in [0, -1] { + // 3 - Do request, if needed + if req.method == .post { + if req.dataformat == .urlencoded && req.data == '' && req.params.len > 0 { + req.data = http.url_encode_form_data(req.params) + } + } + url := h.url(req) + + // println("----") + // println(url) + // println(req.data) + // println("----") + + mut new_req := http.new_request(req.method, url, req.data) + // joining the header from the HTTPConnection with the one from Request + new_req.header = h.header() + + if new_req.header.contains(http.CommonHeader.content_type) { + panic('bug: content_type should not be set as part of default header') + } + + match req.dataformat { + .json { + new_req.header.set(http.CommonHeader.content_type, 'application/json') + } + .urlencoded { + new_req.header.set(http.CommonHeader.content_type, 'application/x-www-form-urlencoded') + } + .multipart_form { + new_req.header.set(http.CommonHeader.content_type, 'multipart/form-data') + } + } + + println(new_req) + if req.debug { + console.print_debug('http request:\n${new_req.str()}') + } + for _ in 0 .. h.retry { + response = new_req.do() or { + err_message = 'Cannot send request:${req}\nerror:${err}' + // console.print_debug(err_message) + continue + } + break + } + if req.debug { + console.print_debug(response.str()) + } + if response.status_code == 0 { + return error(err_message) + } + result.code = response.status_code + result.data = response.body + } + + // 4 - Set in cache if enabled + if !from_cache && is_cacheable && result.code in h.cache.allowable_codes { + h.cache_set(req, result)! + } + + if h.needs_invalidate(req, result.code) { + h.cache_invalidate(req)! + } + + // 5 - Return result + return result +} + +pub fn (r Result) is_ok() bool { + return r.code >= 200 && r.code <= 399 +} + +// dict_key string //if the return is a dict, then will take the element out of the dict with the key and process further +pub fn (mut h HTTPConnection) post_json_str(req_ Request) !string { + mut req := req_ + req.method = .post + result := h.send(req)! + if result.is_ok() { + mut data_ := result.data + if req.dict_key.len > 0 { + data_ = herojson.json_dict_get_string(data_, false, req.dict_key)! + } + return data_ + } + return error('Could not post ${req}\result:\n${result}') +} + +// do a request with certain prefix on the already specified url +// parse as json +pub fn (mut h HTTPConnection) get_json_dict(req Request) !map[string]json2.Any { + data_ := h.get(req)! + mut data := map[string]json2.Any{} + data = herojson.json_dict_filter_any(data_, false, [], [])! + return data +} + +// dict_key string //if the return is a dict, then will take the element out of the dict with the key and process further +// list_dict_key string //if the output is a list of dicts, then will process each element of the list to take the val with key out of that dict +// e.g. the input is a list of dicts e.g. [{"key":{"name":"kristof@incubaid.com",...},{"key":...}] +pub fn (mut h HTTPConnection) get_json_list(req Request) ![]string { + mut data_ := h.get(req)! + if req.dict_key.len > 0 { + data_ = herojson.json_dict_get_string(data_, false, req.dict_key)! + } + if req.list_dict_key.len > 0 { + return herojson.json_list_dict_get_string(data_, false, req.list_dict_key)! + } + data := herojson.json_list(data_, false) + return data +} + +// dict_key string //if the return is a dict, then will take the element out of the dict with the key and process further +pub fn (mut h HTTPConnection) get_json(req Request) !string { + h.default_header.add(.content_language, 'Content-Type: application/json') + mut data_ := h.get(req)! + if req.dict_key.len > 0 { + data_ = herojson.json_dict_get_string(data_, false, req.dict_key)! + } + return data_ +} + +// Get Request with json data and return response as string +pub fn (mut h HTTPConnection) get(req_ Request) !string { + mut req := req_ + req.debug = true + req.method = .get + result := h.send(req)! + return result.data +} + +// Delete Request with json data and return response as string +pub fn (mut h HTTPConnection) delete(req_ Request) !string { + mut req := req_ + req.method = .delete + result := h.send(req)! + return result.data +} + +// performs a multi part form data request +pub fn (mut h HTTPConnection) post_multi_part(req Request, form http.PostMultipartFormConfig) !http.Response { + mut req_form := form + mut header := h.header() + header.set(http.CommonHeader.content_type, 'multipart/form-data') + req_form.header = header + url := h.url(req) + return http.post_multipart_form(url, req_form)! +} diff --git a/lib/clients/httpconnection/connection_methods_generic.v b/lib/clients/httpconnection/connection_methods_generic.v new file mode 100644 index 00000000..fd63d4e9 --- /dev/null +++ b/lib/clients/httpconnection/connection_methods_generic.v @@ -0,0 +1,22 @@ +module httpconnection + +import json + +pub fn (mut h HTTPConnection) get_json_generic[T](req Request) !T { + data := h.get_json(req)! + return json.decode(T, data) or { return error("couldn't decode json for ${req} for ${data}") } +} + +pub fn (mut h HTTPConnection) post_json_generic[T](req Request) !T { + data := h.post_json_str(req)! + return json.decode(T, data) or { return error("couldn't decode json for ${req} for ${data}") } +} + +pub fn (mut h HTTPConnection) get_json_list_generic[T](req Request) ![]T { + mut r := []T{} + for item in h.get_json_list(req)! { + // println(item) + r << json.decode(T, item) or { return error("couldn't decode json for ${req} for ${item}") } + } + return r +} diff --git a/lib/clients/httpconnection/factory.v b/lib/clients/httpconnection/factory.v new file mode 100644 index 00000000..3b535565 --- /dev/null +++ b/lib/clients/httpconnection/factory.v @@ -0,0 +1,39 @@ +module httpconnection + +import net.http +import freeflowuniverse.herolib.clients.redisclient { RedisURL } + + +@[params] +pub struct HTTPConnectionArgs { +pub: + name string @[required] + url string @[required] + cache bool + retry int = 1 +} + +pub fn new(args HTTPConnectionArgs) !&HTTPConnection { + // mut f := factory + + mut header := http.new_header() + + if args.url.replace(' ', '') == '' { + panic("URL is empty, can't create http connection with empty url") + } + + // Init connection + mut conn := HTTPConnection{ + redis: redisclient.core_get(RedisURL{})! + default_header: header + cache: CacheConfig{ + disable: !args.cache + key: args.name + } + retry: args.retry + base_url: args.url.trim('/') + } + return &conn + +} + diff --git a/lib/clients/httpconnection/readme.md b/lib/clients/httpconnection/readme.md new file mode 100644 index 00000000..fa04635c --- /dev/null +++ b/lib/clients/httpconnection/readme.md @@ -0,0 +1,171 @@ +# HTTPConnection Module + +The HTTPConnection module provides a robust HTTP client implementation with support for JSON handling, custom headers, retries, and caching. + +## Features + +- Generic JSON methods for type-safe requests +- Custom header support +- Built-in retry mechanism +- Cache configuration +- URL encoding support + +## Basic Usage + +```v +import freeflowuniverse.herolib.clients.httpconnection + +// Create a new HTTP connection +mut conn := HTTPConnection{ + base_url: 'https://api.example.com' + retry: 5 // number of retries for failed requests +} +``` + +## Examples + +### GET Request with JSON Response + +```v +// Define your data structure +struct User { + id int + name string + email string +} + +// Make a GET request and decode JSON response +user := conn.get_json_generic[User]( + method: .get + prefix: 'users/1' + dataformat: .urlencoded +)! +``` + +### GET Request for List of Items + +```v +// Get a list of items and decode each one +users := conn.get_json_list_generic[User]( + method: .get + prefix: 'users' + list_dict_key: 'users' // if response is wrapped in a key + dataformat: .urlencoded +)! +``` + +### POST Request with JSON Data + +```v +// Create new resource with POST +new_user := conn.post_json_generic[User]( + method: .post + prefix: 'users' + dataformat: .urlencoded + params: { + 'name': 'John Doe' + 'email': 'john@example.com' + } +)! +``` + +### Real-World Example: SSH Key Management + +Here's a practical example inspired by SSH key management in a cloud API: + +```v +// Define the SSH key structure +struct SSHKey { +pub mut: + name string + fingerprint string + type_ string @[json: 'type'] + size int + created_at string + data string +} + +// Get all SSH keys +fn get_ssh_keys(mut conn HTTPConnection) ![]SSHKey { + return conn.get_json_list_generic[SSHKey]( + method: .get + prefix: 'key' + list_dict_key: 'key' + dataformat: .urlencoded + )! +} + +// Create a new SSH key +fn create_ssh_key(mut conn HTTPConnection, name string, key_data string) !SSHKey { + return conn.post_json_generic[SSHKey]( + method: .post + prefix: 'key' + dataformat: .urlencoded + params: { + 'name': name + 'data': key_data + } + )! +} + +// Delete an SSH key +fn delete_ssh_key(mut conn HTTPConnection, fingerprint string) ! { + conn.delete( + method: .delete + prefix: 'key/${fingerprint}' + dataformat: .urlencoded + )! +} +``` + +## Custom Headers + +You can set default headers for all requests or specify headers for individual requests: + +```v +import net.http { Header } + +// Set default headers for all requests +conn.default_header = http.new_header( + key: .authorization + value: 'Bearer your-token-here' +) + +// Add custom headers for specific request +response := conn.get_json( + method: .get + prefix: 'protected/resource' + header: http.new_header( + key: .content_type + value: 'application/json' + ) +)! +``` + +## Error Handling + +The module uses V's built-in error handling. All methods that can fail return a Result type: + +```v +// Handle potential errors +user := conn.get_json_generic[User]( + method: .get + prefix: 'users/1' +) or { + println('Error: ${err}') + return +} +``` + +## Cache Configuration + +The module supports caching of responses. Configure caching behavior through the `CacheConfig` struct: + +```v +mut conn := HTTPConnection{ + base_url: 'https://api.example.com' + cache: CacheConfig{ + enabled: true + // Add other cache configuration as needed + } +} diff --git a/lib/clients/httpconnection/request.v b/lib/clients/httpconnection/request.v new file mode 100644 index 00000000..95837f44 --- /dev/null +++ b/lib/clients/httpconnection/request.v @@ -0,0 +1,25 @@ +module httpconnection + +import net.http { Header, Method } + +pub enum DataFormat { + json // application/json + urlencoded // + multipart_form // +} + +@[params] +pub struct Request { +pub mut: + method Method + prefix string + id string + params map[string]string + data string + cache_disable bool // do not put this default on true, this is set on the connection, this is here to be overruled in specific cases + header ?Header + dict_key string // if the return is a dict, then will take the element out of the dict with the key and process further + list_dict_key string // if the output is a list of dicts, then will process each element of the list to take the val with key out of that dict + debug bool + dataformat DataFormat +} diff --git a/lib/clients/mailclient/.heroscript b/lib/clients/mailclient/.heroscript new file mode 100644 index 00000000..2079adfb --- /dev/null +++ b/lib/clients/mailclient/.heroscript @@ -0,0 +1,7 @@ + +!!hero_code.generate_client + name:'mailclient' + classname:'MailClient' + singleton:0 + default:1 + reset:0 \ No newline at end of file diff --git a/lib/clients/mailclient/client.v b/lib/clients/mailclient/client.v new file mode 100644 index 00000000..eccb72da --- /dev/null +++ b/lib/clients/mailclient/client.v @@ -0,0 +1,71 @@ +module mailclient + +import freeflowuniverse.herolib.core.texttools +import net.smtp +import time + +@[params] +pub struct SendArgs { +pub mut: + markdown bool + from string + to string + cc string + bcc string + date time.Time = time.now() + subject string + body_type BodyType + body string +} + +pub enum BodyType { + text + html + markdown +} + +// ``` +// cl.send(markdown:true,subject:'this is a test',to:'kds@something.com,kds2@else.com',body:' +// this is my email content +// ')! +// args: +// markdown bool +// from string +// to string +// cc string +// bcc string +// date time.Time = time.now() +// subject string +// body_type BodyType (.html, .text, .markdown) +// body string +// ``` +pub fn (mut cl MailClient) send(args_ SendArgs) ! { + mut args := args_ + args.body = texttools.dedent(args.body) + mut body_type := smtp.BodyType.text + if args.body_type == .html || args.body_type == .markdown { + body_type = smtp.BodyType.html + } + mut m := smtp.Mail{ + from: args.from + to: args.to + cc: args.cc + bcc: args.bcc + date: args.date + subject: args.subject + body: args.body + body_type: body_type + } + + mut smtp_client := smtp.new_client( + server: cl.mail_server + port: cl.mail_port + username: cl.mail_username + password: cl.mail_password + from: cl.mail_from + ssl: cl.ssl + starttls: cl.tls + )! + + return smtp_client.send(m) +} diff --git a/lib/clients/mailclient/mailclient_factory.v b/lib/clients/mailclient/mailclient_factory.v new file mode 100644 index 00000000..aee1171b --- /dev/null +++ b/lib/clients/mailclient/mailclient_factory.v @@ -0,0 +1,107 @@ +module mailclient + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.core.playbook + +__global ( + mailclient_global map[string]&MailClient + mailclient_default string +) + +/////////FACTORY + +@[params] +pub struct ArgsGet { +pub mut: + name string = 'default' +} + +fn args_get(args_ ArgsGet) ArgsGet { + mut args := args_ + if args.name == '' { + args.name = mailclient_default + } + if args.name == '' { + args.name = 'default' + } + return args +} + +pub fn get(args_ ArgsGet) !&MailClient { + mut args := args_get(args_) + if args.name !in mailclient_global { + if !config_exists() { + if default { + config_save()! + } + } + config_load()! + } + return mailclient_global[args.name] or { panic('bug') } +} + +// switch instance to be used for mailclient +pub fn switch(name string) { + mailclient_default = name +} + +fn config_exists(args_ ArgsGet) bool { + mut args := args_get(args_) + mut context := base.context() or { panic('bug') } + return context.hero_config_exists('mailclient', args.name) +} + +fn config_load(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context := base.context()! + mut heroscript := context.hero_config_get('mailclient', args.name)! + play(heroscript: heroscript)! +} + +fn config_save(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context := base.context()! + context.hero_config_set('mailclient', args.name, heroscript_default())! +} + +fn set(o MailClient) ! { + mut o2 := obj_init(o)! + mailclient_global['default'] = &o2 +} + +@[params] +pub struct InstallPlayArgs { +pub mut: + name string = 'default' + heroscript string // if filled in then plbook will be made out of it + plbook ?playbook.PlayBook + reset bool + start bool + stop bool + restart bool + delete bool + configure bool // make sure there is at least one installed +} + +pub fn play(args_ InstallPlayArgs) ! { + mut args := args_ + println('debguzo1') + mut plbook := args.plbook or { + println('debguzo2') + heroscript := if args.heroscript == '' { + heroscript_default() + } else { + args.heroscript + } + playbook.new(text: heroscript)! + } + + mut install_actions := plbook.find(filter: 'mailclient.configure')! + println('debguzo3 ${install_actions}') + if install_actions.len > 0 { + for install_action in install_actions { + mut p := install_action.params + cfg_play(p)! + } + } +} diff --git a/lib/clients/mailclient/mailclient_model.v b/lib/clients/mailclient/mailclient_model.v new file mode 100644 index 00000000..a0a0e90e --- /dev/null +++ b/lib/clients/mailclient/mailclient_model.v @@ -0,0 +1,70 @@ +module mailclient + +import freeflowuniverse.herolib.data.paramsparser +import os + +pub const version = '1.0.0' +const singleton = false +const default = true + +// TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT +pub fn heroscript_default() string { + mail_from := os.getenv_opt('MAIL_FROM') or { 'info@example.com' } + mail_password := os.getenv_opt('MAIL_PASSWORD') or { 'secretpassword' } + mail_port := (os.getenv_opt('MAIL_PORT') or { '465' }).int() + mail_server := os.getenv_opt('MAIL_SERVER') or { 'smtp-relay.brevo.com' } + mail_username := os.getenv_opt('MAIL_USERNAME') or { 'kristof@incubaid.com' } + + heroscript := " +!!mailclient.configure name:'default' + mail_from: '${mail_from}' + mail_password: '${mail_password}' + mail_port: ${mail_port} + mail_server: '${mail_server}' + mail_username: '${mail_username}' +" + + return heroscript +} + +pub struct MailClient { +pub mut: + name string = 'default' + mail_from string + mail_password string @[secret] + mail_port int = 465 + mail_server string + mail_username string + ssl bool = true + tls bool +} + +fn cfg_play(p paramsparser.Params) ! { + mut mycfg := MailClient{ + name: p.get_default('name', 'default')! + mail_from: p.get('mail_from')! + mail_password: p.get('mail_password')! + mail_port: p.get_int_default('mail_port', 465)! + mail_server: p.get('mail_server')! + mail_username: p.get('mail_username')! + } + set(mycfg)! +} + +fn obj_init(obj_ MailClient) !MailClient { + // never call get here, only thing we can do here is work on object itself + mut obj := obj_ + return obj +} + +// user needs to us switch to make sure we get the right object +pub fn configure(config MailClient) !MailClient { + client := MailClient{ + ...config + } + set(client)! + return client + // THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED + + // implement if steps need to be done for configuration +} diff --git a/lib/clients/mailclient/readme.md b/lib/clients/mailclient/readme.md new file mode 100644 index 00000000..e98a40f2 --- /dev/null +++ b/lib/clients/mailclient/readme.md @@ -0,0 +1,50 @@ +# mailclient + + + +To get started + +```vlang + +import freeflowuniverse.herolib.clients. mailclient + +mut client:= mailclient.get()! + +client.send(subject:'this is a test',to:'kds@something.com,kds2@else.com',body:' + this is my email content + ')! + +``` + +## example heroscript + +```hero +!!mailclient.configure + secret: '...' + host: 'localhost' + port: 8888 +``` + +## use of env variables + +if you have a secrets file you could import as + +```bash +//e.g. source ~/code/git.ourworld.tf/despiegk/hero_secrets/mysecrets.sh +``` + +following env variables are supported + +- MAIL_FROM= +- MAIL_PASSWORD= +- MAIL_PORT=465 +- MAIL_SERVER=smtp-relay.brevo.com +- MAIL_USERNAME=kristof@incubaid.com + +these variables will only be set at configure time + + +## brevo remark + +- use ssl +- use port: 465 \ No newline at end of file diff --git a/lib/clients/meilisearch/.heroscript b/lib/clients/meilisearch/.heroscript new file mode 100644 index 00000000..72088933 --- /dev/null +++ b/lib/clients/meilisearch/.heroscript @@ -0,0 +1,7 @@ + +!!hero_code.generate_client + name:'meilisearch' + classname:'MeilisearchClient' + singleton:0 + default:1 + reset:0 \ No newline at end of file diff --git a/lib/clients/meilisearch/client.v b/lib/clients/meilisearch/client.v new file mode 100644 index 00000000..e4fb4743 --- /dev/null +++ b/lib/clients/meilisearch/client.v @@ -0,0 +1,457 @@ +module meilisearch + +import freeflowuniverse.herolib.clients.httpconnection +import x.json2 +import json + +// health checks if the server is healthy +pub fn (mut client MeilisearchClient) health() !Health { + req := httpconnection.Request{ + prefix: 'health' + } + mut http := client.httpclient()! + response := http.get_json(req)! + return json2.decode[Health](response) +} + +// version gets the version of the Meilisearch server +pub fn (mut client MeilisearchClient) version() !Version { + req := httpconnection.Request{ + prefix: 'version' + } + mut http := client.httpclient()! + response := http.get_json(req)! + return json2.decode[Version](response) +} + +// create_index creates a new index with the given UID +pub fn (mut client MeilisearchClient) create_index(args CreateIndexArgs) !CreateIndexResponse { + req := httpconnection.Request{ + prefix: 'indexes' + method: .post + data: json2.encode(args) + } + mut http := client.httpclient()! + response := http.post_json_str(req)! + return json2.decode[CreateIndexResponse](response) +} + +// get_index retrieves information about an index +pub fn (mut client MeilisearchClient) get_index(uid string) !GetIndexResponse { + req := httpconnection.Request{ + prefix: 'indexes/${uid}' + } + mut http := client.httpclient()! + response := http.get_json(req)! + return json2.decode[GetIndexResponse](response) +} + +// list_indexes retrieves all indexes +pub fn (mut client MeilisearchClient) list_indexes(args ListIndexArgs) ![]GetIndexResponse { + req := httpconnection.Request{ + prefix: 'indexes?limit=${args.limit}&offset=${args.offset}' + } + mut http := client.httpclient()! + response := http.get_json(req)! + list_response := json.decode(ListResponse[GetIndexResponse], response)! + return list_response.results +} + +// delete_index deletes an index +pub fn (mut client MeilisearchClient) delete_index(uid string) !DeleteIndexResponse { + req := httpconnection.Request{ + prefix: 'indexes/${uid}' + } + mut http := client.httpclient()! + response := http.delete(req)! + return json2.decode[DeleteIndexResponse](response) +} + +// get_settings retrieves all settings of an index +pub fn (mut client MeilisearchClient) get_settings(uid string) !IndexSettings { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + + mut settings := IndexSettings{} + if ranking_rules := response['rankingRules'] { + settings.ranking_rules = ranking_rules.arr().map(it.str()) + } + if distinct_attribute := response['distinctAttribute'] { + settings.distinct_attribute = distinct_attribute.str() + } + if searchable_attributes := response['searchableAttributes'] { + settings.searchable_attributes = searchable_attributes.arr().map(it.str()) + } + if displayed_attributes := response['displayedAttributes'] { + settings.displayed_attributes = displayed_attributes.arr().map(it.str()) + } + if stop_words := response['stopWords'] { + settings.stop_words = stop_words.arr().map(it.str()) + } + if filterable_attributes := response['filterableAttributes'] { + settings.filterable_attributes = filterable_attributes.arr().map(it.str()) + } + if sortable_attributes := response['sortableAttributes'] { + settings.sortable_attributes = sortable_attributes.arr().map(it.str()) + } + + return settings +} + +// update_settings updates all settings of an index +pub fn (mut client MeilisearchClient) update_settings(uid string, settings IndexSettings) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings' + method: .patch + data: json2.encode(settings) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_settings resets all settings of an index to default values +pub fn (mut client MeilisearchClient) reset_settings(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_ranking_rules retrieves ranking rules of an index +pub fn (mut client MeilisearchClient) get_ranking_rules(uid string) ![]string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/ranking-rules' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + return response['rankingRules']!.arr().map(it.str()) +} + +// update_ranking_rules updates ranking rules of an index +pub fn (mut client MeilisearchClient) update_ranking_rules(uid string, rules []string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/ranking-rules' + method: .put + data: json2.encode({ + 'rankingRules': rules + }) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_ranking_rules resets ranking rules of an index to default values +pub fn (mut client MeilisearchClient) reset_ranking_rules(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/ranking-rules' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_distinct_attribute retrieves distinct attribute of an index +pub fn (mut client MeilisearchClient) get_distinct_attribute(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/distinct-attribute' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + return response['distinctAttribute']!.str() +} + +// update_distinct_attribute updates distinct attribute of an index +pub fn (mut client MeilisearchClient) update_distinct_attribute(uid string, attribute string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/distinct-attribute' + method: .put + data: json2.encode({ + 'distinctAttribute': attribute + }) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_distinct_attribute resets distinct attribute of an index +pub fn (mut client MeilisearchClient) reset_distinct_attribute(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/distinct-attribute' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_searchable_attributes retrieves searchable attributes of an index +pub fn (mut client MeilisearchClient) get_searchable_attributes(uid string) ![]string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/searchable-attributes' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + return response['searchableAttributes']!.arr().map(it.str()) +} + +// update_searchable_attributes updates searchable attributes of an index +pub fn (mut client MeilisearchClient) update_searchable_attributes(uid string, attributes []string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/searchable-attributes' + method: .put + data: json2.encode({ + 'searchableAttributes': attributes + }) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_searchable_attributes resets searchable attributes of an index +pub fn (mut client MeilisearchClient) reset_searchable_attributes(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/searchable-attributes' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_displayed_attributes retrieves displayed attributes of an index +pub fn (mut client MeilisearchClient) get_displayed_attributes(uid string) ![]string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/displayed-attributes' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + return response['displayedAttributes']!.arr().map(it.str()) +} + +// update_displayed_attributes updates displayed attributes of an index +pub fn (mut client MeilisearchClient) update_displayed_attributes(uid string, attributes []string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/displayed-attributes' + method: .put + data: json2.encode({ + 'displayedAttributes': attributes + }) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_displayed_attributes resets displayed attributes of an index +pub fn (mut client MeilisearchClient) reset_displayed_attributes(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/displayed-attributes' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_stop_words retrieves stop words of an index +pub fn (mut client MeilisearchClient) get_stop_words(uid string) ![]string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/stop-words' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + return response['stopWords']!.arr().map(it.str()) +} + +// update_stop_words updates stop words of an index +pub fn (mut client MeilisearchClient) update_stop_words(uid string, words []string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/stop-words' + method: .put + data: json2.encode({ + 'stopWords': words + }) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_stop_words resets stop words of an index +pub fn (mut client MeilisearchClient) reset_stop_words(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/stop-words' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_synonyms retrieves synonyms of an index +pub fn (mut client MeilisearchClient) get_synonyms(uid string) !map[string][]string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/synonyms' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + mut synonyms := map[string][]string{} + for key, value in response['synonyms']!.as_map() { + synonyms[key] = value.arr().map(it.str()) + } + return synonyms +} + +// update_synonyms updates synonyms of an index +pub fn (mut client MeilisearchClient) update_synonyms(uid string, synonyms map[string][]string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/synonyms' + method: .put + data: json2.encode({ + 'synonyms': synonyms + }) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_synonyms resets synonyms of an index +pub fn (mut client MeilisearchClient) reset_synonyms(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/synonyms' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_filterable_attributes retrieves filterable attributes of an index +pub fn (mut client MeilisearchClient) get_filterable_attributes(uid string) ![]string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/filterable-attributes' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + return response['filterableAttributes']!.arr().map(it.str()) +} + +// update_filterable_attributes updates filterable attributes of an index +pub fn (mut client MeilisearchClient) update_filterable_attributes(uid string, attributes []string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/filterable-attributes' + method: .put + data: json.encode(attributes) + } + mut http := client.httpclient()! + response := http.send(req)! + return response.data +} + +// reset_filterable_attributes resets filterable attributes of an index +pub fn (mut client MeilisearchClient) reset_filterable_attributes(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/filterable-attributes' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_sortable_attributes retrieves sortable attributes of an index +pub fn (mut client MeilisearchClient) get_sortable_attributes(uid string) ![]string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/sortable-attributes' + } + mut http := client.httpclient()! + response := http.get_json_dict(req)! + return response['sortableAttributes']!.arr().map(it.str()) +} + +// update_sortable_attributes updates sortable attributes of an index +pub fn (mut client MeilisearchClient) update_sortable_attributes(uid string, attributes []string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/sortable-attributes' + method: .put + data: json2.encode({ + 'sortableAttributes': attributes + }) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_sortable_attributes resets sortable attributes of an index +pub fn (mut client MeilisearchClient) reset_sortable_attributes(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/sortable-attributes' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +// get_typo_tolerance retrieves typo tolerance settings of an index +pub fn (mut client MeilisearchClient) get_typo_tolerance(uid string) !TypoTolerance { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/typo-tolerance' + } + + mut http := client.httpclient()! + response := http.get_json_dict(req)! + min_word_size_for_typos := json2.decode[MinWordSizeForTypos](response['minWordSizeForTypos']!.json_str())! + mut typo_tolerance := TypoTolerance{ + enabled: response['enabled']!.bool() + min_word_size_for_typos: min_word_size_for_typos + } + + if disable_on_words := response['disableOnWords'] { + typo_tolerance.disable_on_words = disable_on_words.arr().map(it.str()) + } + if disable_on_attributes := response['disableOnAttributes'] { + typo_tolerance.disable_on_attributes = disable_on_attributes.arr().map(it.str()) + } + + return typo_tolerance +} + +// update_typo_tolerance updates typo tolerance settings of an index +pub fn (mut client MeilisearchClient) update_typo_tolerance(uid string, typo_tolerance TypoTolerance) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/typo-tolerance' + method: .patch + data: json2.encode(typo_tolerance) + } + mut http := client.httpclient()! + return http.post_json_str(req) +} + +// reset_typo_tolerance resets typo tolerance settings of an index +pub fn (mut client MeilisearchClient) reset_typo_tolerance(uid string) !string { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/settings/typo-tolerance' + method: .delete + } + mut http := client.httpclient()! + return http.delete(req) +} + +@[params] +pub struct EperimentalFeaturesArgs { +pub mut: + vector_store bool @[json: 'vectorStore'] + metrics bool @[json: 'metrics'] + logs_route bool @[json: 'logsRoute'] + contains_filter bool @[json: 'containsFilter'] + edit_documents_by_function bool @[json: 'editDocumentsByFunction'] +} + +pub fn (mut client MeilisearchClient) enable_eperimental_feature(args EperimentalFeaturesArgs) !EperimentalFeaturesArgs { + req := httpconnection.Request{ + prefix: 'experimental-features' + method: .patch + data: json.encode(args) + } + + mut http := client.httpclient()! + response := http.send(req)! + return json.decode(EperimentalFeaturesArgs, response.data) +} diff --git a/lib/clients/meilisearch/document_test.v b/lib/clients/meilisearch/document_test.v new file mode 100644 index 00000000..7c98d688 --- /dev/null +++ b/lib/clients/meilisearch/document_test.v @@ -0,0 +1,287 @@ +module meilisearch + +import rand +import time + +struct MeiliDocument { +pub mut: + id int + title string + content string +} + +// Set up a test client instance +fn setup_client() !&MeilisearchClient { + mut client := get()! + return client +} + +fn test_add_document() { + mut client := setup_client()! + index_name := rand.string(5) + documents := [ + MeiliDocument{ + id: 1 + content: 'Shazam is a 2019 American superhero film based on the DC Comics character of the same name.' + title: 'Shazam' + }, + ] + + mut doc := client.add_documents(index_name, documents)! + assert doc.index_uid == index_name + assert doc.type_ == 'documentAdditionOrUpdate' +} + +fn test_get_document() { + mut client := setup_client()! + index_name := rand.string(5) + + documents := [ + MeiliDocument{ + id: 1 + title: 'Shazam' + content: 'Shazam is a 2019 American superhero film based on the DC Comics character of the same name.' + }, + ] + + mut doc := client.add_documents(index_name, documents)! + assert doc.index_uid == index_name + assert doc.type_ == 'documentAdditionOrUpdate' + + time.sleep(500 * time.millisecond) + + doc_ := client.get_document[MeiliDocument]( + uid: index_name + document_id: 1 + fields: ['id', 'title'] + )! + + assert doc_.title == 'Shazam' + assert doc_.id == 1 +} + +fn test_get_documents() { + mut client := setup_client()! + index_name := rand.string(5) + + documents := [ + MeiliDocument{ + id: 1 + title: 'The Kit kat' + content: 'The kit kat is an Egypton film that was released in 2019.' + }, + MeiliDocument{ + id: 2 + title: 'Elli Bali Balak' + content: 'Elli Bali Balak is an Egyptian film that was released in 2019.' + }, + ] + + q := DocumentsQuery{ + fields: ['title', 'id'] + } + + mut doc := client.add_documents(index_name, documents)! + assert doc.index_uid == index_name + assert doc.type_ == 'documentAdditionOrUpdate' + + time.sleep(500 * time.millisecond) + + mut docs := client.get_documents[MeiliDocument](index_name, q)! + + assert docs.len > 0 + assert docs[0].title == 'The Kit kat' + assert docs[0].id == 1 + assert docs[1].title == 'Elli Bali Balak' + assert docs[1].id == 2 +} + +fn test_delete_document() { + mut client := setup_client()! + index_name := rand.string(5) + + documents := [ + MeiliDocument{ + id: 1 + title: 'Shazam' + content: 'Shazam is a 2019 American superhero film based on the DC Comics character of the same name.' + }, + ] + + mut doc := client.add_documents(index_name, documents)! + assert doc.index_uid == index_name + assert doc.type_ == 'documentAdditionOrUpdate' + + time.sleep(500 * time.millisecond) + + mut doc_ := client.delete_document( + uid: index_name + document_id: 1 + )! + + assert doc_.index_uid == index_name + assert doc_.type_ == 'documentDeletion' +} + +fn test_delete_documents() { + mut client := setup_client()! + index_name := rand.string(5) + + documents := [ + MeiliDocument{ + id: 1 + title: 'Shazam' + content: 'Shazam is a 2019 American superhero film based on the DC Comics character of the same name.' + }, + MeiliDocument{ + id: 2 + title: 'Shazam2' + content: 'Shazam2 is a 2019 American superhero film based on the DC Comics character of the same name.' + }, + ] + + mut doc := client.add_documents(index_name, documents)! + assert doc.index_uid == index_name + assert doc.type_ == 'documentAdditionOrUpdate' + + time.sleep(500 * time.millisecond) + + mut doc_ := client.delete_all_documents(index_name)! + + assert doc_.index_uid == index_name + assert doc_.type_ == 'documentDeletion' + + time.sleep(500 * time.millisecond) + + q := DocumentsQuery{ + fields: ['title', 'id'] + } + + mut docs := client.get_documents[MeiliDocument](index_name, q)! + + assert docs.len == 0 +} + +fn test_search() { + mut client := setup_client()! + index_name := rand.string(5) + + documents := [ + MeiliDocument{ + id: 1 + title: 'Power of rich people' + content: 'Power of rich people is an American film.' + }, + MeiliDocument{ + id: 2 + title: 'Capten America' + content: 'Capten America is an American film.' + }, + MeiliDocument{ + id: 3 + title: 'Coldplay' + content: 'Coldplay is a british rock band.' + }, + ] + + mut doc := client.add_documents(index_name, documents)! + assert doc.index_uid == index_name + assert doc.type_ == 'documentAdditionOrUpdate' + + time.sleep(500 * time.millisecond) + + mut doc_ := client.search[MeiliDocument](index_name, q: 'Coldplay')! + + assert doc_.hits[0].id == 3 +} + +fn test_facet_search() { + mut client := setup_client()! + index_name := rand.string(5) + + documents := [ + MeiliDocument{ + id: 1 + title: 'Life' + content: 'Two men in 1930s Mississippi become friends after being sentenced to life in prison together for a crime they did not commit.' + }, + MeiliDocument{ + id: 2 + title: 'Life' + content: 'In 1955, young photographer Dennis Stock develops a close bond with actor James Dean while shooting pictures of the rising Hollywood star.' + }, + MeiliDocument{ + id: 3 + title: 'Coldplay' + content: 'Coldplay is a british rock band.' + }, + ] + + mut doc := client.add_documents(index_name, documents)! + assert doc.index_uid == index_name + assert doc.type_ == 'documentAdditionOrUpdate' + + time.sleep(500 * time.millisecond) + res := client.update_filterable_attributes(index_name, ['title'])! + + time.sleep(500 * time.millisecond) + settings := client.get_settings(index_name)! + + assert ['title'] == settings.filterable_attributes + + mut doc_ := client.facet_search(index_name, + facet_name: 'title' + filter: 'title = life' + )! + assert doc_.facet_hits[0].count == 2 +} + +fn test_similar_documents() { + mut client := setup_client()! + index_name := rand.string(5) + + documents := [ + MeiliDocument{ + id: 1 + title: 'Life' + content: 'Two men in 1930s Mississippi become friends after being sentenced to life in prison together for a crime they did not commit.' + }, + MeiliDocument{ + id: 2 + title: 'Life' + content: 'In 1955, young photographer Dennis Stock develops a close bond with actor James Dean while shooting pictures of the rising Hollywood star.' + }, + MeiliDocument{ + id: 3 + title: 'Coldplay' + content: 'Coldplay is a british rock band.' + }, + ] + + mut doc := client.add_documents(index_name, documents)! + assert doc.index_uid == index_name + assert doc.type_ == 'documentAdditionOrUpdate' + + time.sleep(500 * time.millisecond) + + mut doc_ := client.similar_documents(index_name, + id: 1 + )! + // TODO: Check the meilisearch.SimilarDocumentsResponse error + println('doc_: ${doc_}') + // assert doc_.facet_hits[0].count == 2 +} + +// Delete all created indexes +fn test_delete_index() { + mut client := setup_client()! + mut index_list := client.list_indexes(limit: 100)! + + for index in index_list { + client.delete_index(index.uid)! + time.sleep(500 * time.millisecond) + } + + index_list = client.list_indexes(limit: 100)! + assert index_list.len == 0 +} diff --git a/lib/clients/meilisearch/documents.v b/lib/clients/meilisearch/documents.v new file mode 100644 index 00000000..87d1a830 --- /dev/null +++ b/lib/clients/meilisearch/documents.v @@ -0,0 +1,236 @@ +module meilisearch + +import freeflowuniverse.herolib.clients.httpconnection +import x.json2 +import json + +// add_documents adds documents to an index +pub fn (mut client MeilisearchClient) add_documents[T](uid string, documents []T) !AddDocumentResponse { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/documents' + method: .post + data: json2.encode(documents) + } + mut http := client.httpclient()! + response := http.post_json_str(req)! + return json2.decode[AddDocumentResponse](response)! +} + +@[params] +struct GetDocumentArgs { +pub mut: + uid string @[required] + document_id int @[required] + fields []string + retrieve_vectors bool @[json: 'retrieveVectors'] +} + +// get_document retrieves one document by its id +pub fn (mut client MeilisearchClient) get_document[T](args GetDocumentArgs) !T { + mut params := map[string]string{} + if args.fields.len > 0 { + params['fields'] = args.fields.join(',') + } + + params['retrieveVectors'] = args.retrieve_vectors.str() + + req := httpconnection.Request{ + prefix: 'indexes/${args.uid}/documents/${args.document_id}' + params: params + } + + mut http := client.httpclient()! + response := http.get_json(req)! + return json.decode(T, response) +} + +// get_documents retrieves documents with optional parameters +pub fn (mut client MeilisearchClient) get_documents[T](uid string, query DocumentsQuery) ![]T { + mut params := map[string]string{} + params['limit'] = query.limit.str() + params['offset'] = query.offset.str() + + if query.fields.len > 0 { + params['fields'] = query.fields.join(',') + } + if query.filter.len > 0 { + params['filter'] = query.filter + } + if query.sort.len > 0 { + params['sort'] = query.sort.join(',') + } + + req := httpconnection.Request{ + prefix: 'indexes/${uid}/documents' + params: params + } + + mut http := client.httpclient()! + response := http.get_json(req)! + decoded := json.decode(ListResponse[T], response)! + return decoded.results +} + +@[params] +struct DeleteDocumentArgs { +pub mut: + uid string @[required] + document_id int @[required] +} + +// delete_document deletes one document by its id +pub fn (mut client MeilisearchClient) delete_document(args DeleteDocumentArgs) !DeleteDocumentResponse { + req := httpconnection.Request{ + prefix: 'indexes/${args.uid}/documents/${args.document_id}' + method: .delete + } + + mut http := client.httpclient()! + response := http.delete(req)! + return json2.decode[DeleteDocumentResponse](response)! +} + +// delete_all_documents deletes all documents in an index +pub fn (mut client MeilisearchClient) delete_all_documents(uid string) !DeleteDocumentResponse { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/documents' + method: .delete + } + + mut http := client.httpclient()! + response := http.delete(req)! + return json2.decode[DeleteDocumentResponse](response)! +} + +// update_documents updates documents in an index +pub fn (mut client MeilisearchClient) update_documents(uid string, documents string) !TaskInfo { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/documents' + method: .put + data: documents + } + + mut http := client.httpclient()! + response := http.post_json_str(req)! + return json2.decode[TaskInfo](response)! +} + +@[params] +struct SearchArgs { +pub mut: + q string @[json: 'q'; required] + offset int @[json: 'offset'] + limit int = 20 @[json: 'limit'] + hits_per_page int = 1 @[json: 'hitsPerPage'] + page int = 1 @[json: 'page'] + filter ?string + facets ?[]string + attributes_to_retrieve []string = ['*'] @[json: 'attributesToRetrieve'] + attributes_to_crop ?[]string @[json: 'attributesToCrop'] + crop_length int = 10 @[json: 'cropLength'] + crop_marker string = '...' @[json: 'cropMarker'] + attributes_to_highlight ?[]string @[json: 'attributesToHighlight'] + highlight_pre_tag string = '' @[json: 'highlightPreTag'] + highlight_post_tag string = '' @[json: 'highlightPostTag'] + show_matches_position bool @[json: 'showMatchesPosition'] + sort ?[]string + matching_strategy string = 'last' @[json: 'matchingStrategy'] + show_ranking_score bool @[json: 'showRankingScore'] + show_ranking_score_details bool @[json: 'showRankingScoreDetails'] + ranking_score_threshold ?f64 @[json: 'rankingScoreThreshold'] + attributes_to_search_on []string = ['*'] @[json: 'attributesToSearchOn'] + hybrid ?map[string]string + vector ?[]f64 + retrieve_vectors bool @[json: 'retrieveVectors'] + locales ?[]string +} + +// search performs a search query on an index +pub fn (mut client MeilisearchClient) search[T](uid string, args SearchArgs) !SearchResponse[T] { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/search' + method: .post + data: json.encode(args) + } + mut http := client.httpclient()! + rsponse := http.post_json_str(req)! + return json.decode(SearchResponse[T], rsponse) +} + +@[params] +struct FacetSearchArgs { + facet_name ?string @[json: 'facetName'] // Facet name to search values on + facet_query ?string @[json: 'facetQuery'] // Search query for a given facet value. Defaults to placeholder search if not specified. + q string // Query string + filter ?string // Filter queries by an attribute's value + matching_strategy string = 'last' @[json: 'matchingStrategy'] // Strategy used to match query terms within documents + attributes_to_search_on ?[]string @[json: 'attributesToSearchOn'] // Restrict search to the specified attributes +} + +@[params] +struct FacetSearchHitsResponse { + value string @[json: 'value'] // Facet value matching the facetQuery + count int @[json: 'count'] // Number of documents with a facet value matching value +} + +@[params] +struct FacetSearchResponse { + facet_hits []FacetSearchHitsResponse @[json: 'facetHits'] // Facet value matching the facetQuery + facet_query string @[json: 'facetQuery'] // The original facetQuery + processing_time_ms int @[json: 'processingTimeMs'] // Processing time of the query +} + +pub fn (mut client MeilisearchClient) facet_search(uid string, args FacetSearchArgs) !FacetSearchResponse { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/facet-search' + method: .post + data: json.encode(args) + } + mut http := client.httpclient()! + rsponse := http.post_json_str(req)! + return json.decode(FacetSearchResponse, rsponse) +} + +@[params] +struct SimilarDocumentsArgs { + id SimilarDocumentsID @[json: 'id'] // Identifier of the target document (mandatory) + embedder string = 'default' @[json: 'embedder'] // Embedder to use when computing recommendations + attributes_to_retrieve []string = ['*'] @[json: 'attributesToRetrieve'] // Attributes to display in the returned documents + offset int @[json: 'offset'] // Number of documents to skip + limit int = 20 @[json: 'limit'] // Maximum number of documents returned + filter ?string @[json: 'filter'] // Filter queries by an attribute's value + show_ranking_score bool @[json: 'showRankingScore'] // Display the global ranking score of a document + show_ranking_score_details bool @[json: 'showRankingScoreDetails'] // Display detailed ranking score information + ranking_score_threshold ?f64 @[json: 'rankingScoreThreshold'] // Exclude results with low ranking scores + retrieve_vectors bool @[json: 'retrieveVectors'] // Return document vector data +} + +type SimilarDocumentsID = string | int + +@[params] +struct SimilarDocumentsResponse { + hits []SimilarDocumentsHit @[json: 'hits'] // List of hit items + id string @[json: 'id'] // Identifier of the response + processing_time_ms int @[json: 'processingTimeMs'] // Processing time in milliseconds + limit int = 20 @[json: 'limit'] // Maximum number of documents returned + offset int @[json: 'offset'] // Number of documents to skip + estimated_total_hits int @[json: 'estimatedTotalHits'] // Estimated total number of hits +} + +struct SimilarDocumentsHit { + id SimilarDocumentsID @[json: 'id'] // Identifier of the hit item + title string @[json: 'title'] // Title of the hit item +} + +pub fn (mut client MeilisearchClient) similar_documents(uid string, args SimilarDocumentsArgs) !SimilarDocumentsResponse { + req := httpconnection.Request{ + prefix: 'indexes/${uid}/similar' + method: .post + data: json.encode(args) + } + res := client.enable_eperimental_feature(vector_store: true)! // Enable the feature first. + mut http := client.httpclient()! + rsponse := http.post_json_str(req)! + println('rsponse: ${rsponse}') + return json.decode(SimilarDocumentsResponse, rsponse) +} diff --git a/lib/clients/meilisearch/index_test.v b/lib/clients/meilisearch/index_test.v new file mode 100755 index 00000000..4ade8639 --- /dev/null +++ b/lib/clients/meilisearch/index_test.v @@ -0,0 +1,86 @@ +module meilisearch + +import rand +import time + +__global ( + created_indices []string +) + +// Set up a test client instance +fn setup_client() !&MeilisearchClient { + mut client := get()! + return client +} + +// Tests the health endpoint for server status +fn test_health() { + mut client := setup_client()! + health := client.health()! + assert health.status == 'available' +} + +// Tests the version endpoint to ensure version information is present +fn test_version() { + mut client := setup_client()! + version := client.version()! + assert version.pkg_version.len > 0 + assert version.commit_sha.len > 0 + assert version.commit_date.len > 0 +} + +// Tests index creation and verifies if the index UID matches +fn test_create_index() { + index_name := 'test_' + rand.string(4) + mut client := setup_client()! + + index := client.create_index(uid: index_name)! + created_indices << index_name + + assert index.index_uid == index_name + assert index.type_ == 'indexCreation' +} + +// Tests index retrieval and verifies if the retrieved index UID matches +fn test_get_index() { + index_name := 'test_' + rand.string(4) + indes_primary_key := 'id' + mut client := setup_client()! + + created_index := client.create_index(uid: index_name, primary_key: indes_primary_key)! + created_indices << index_name + assert created_index.index_uid == index_name + assert created_index.type_ == 'indexCreation' + + time.sleep(1 * time.second) // Wait for the index to be created. + + retrieved_index := client.get_index(index_name)! + assert retrieved_index.uid == index_name + assert retrieved_index.primary_key == indes_primary_key +} + +// Tests listing all indexes to ensure the created index is in the list +fn test_list_indexes() { + mut client := setup_client()! + index_name := 'test_' + rand.string(4) + + mut index_list := client.list_indexes()! + assert index_list.len > 0 +} + +// Tests deletion of an index and confirms it no longer exists +fn test_delete_index() { + mut client := setup_client()! + mut index_list := client.list_indexes(limit: 100)! + + for index in index_list { + client.delete_index(index.uid)! + time.sleep(500 * time.millisecond) + } + + index_list = client.list_indexes(limit: 100)! + assert index_list.len == 0 + + created_indices.clear() + assert created_indices.len == 0 +} diff --git a/lib/clients/meilisearch/meilisearch.code-workspace b/lib/clients/meilisearch/meilisearch.code-workspace new file mode 100644 index 00000000..b5c671d6 --- /dev/null +++ b/lib/clients/meilisearch/meilisearch.code-workspace @@ -0,0 +1,11 @@ +{ + "folders": [ + { + "path": "." + }, + { + "path": "../../../herolib/clients/httpconnection" + } + ], + "settings": {} +} \ No newline at end of file diff --git a/lib/clients/meilisearch/meilisearch_factory_.v b/lib/clients/meilisearch/meilisearch_factory_.v new file mode 100644 index 00000000..ac7aecd6 --- /dev/null +++ b/lib/clients/meilisearch/meilisearch_factory_.v @@ -0,0 +1,104 @@ +module meilisearch + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.core.playbook + +__global ( + meilisearch_global map[string]&MeilisearchClient + meilisearch_default string +) + +/////////FACTORY + +@[params] +pub struct ArgsGet { +pub mut: + name string = 'default' +} + +fn args_get(args_ ArgsGet) ArgsGet { + mut args := args_ + if args.name == '' { + args.name = meilisearch_default + } + if args.name == '' { + args.name = 'default' + } + return args +} + +pub fn get(args_ ArgsGet) !&MeilisearchClient { + mut args := args_get(args_) + if args.name !in meilisearch_global { + if !config_exists() { + if default { + config_save()! + } + } + config_load()! + } + return meilisearch_global[args.name] or { + println(meilisearch_global) + panic('bug in get from factory: ') + } +} + +fn config_exists(args_ ArgsGet) bool { + mut args := args_get(args_) + mut context := base.context() or { panic('bug') } + return context.hero_config_exists('meilisearch', args.name) +} + +fn config_load(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context := base.context()! + mut heroscript := context.hero_config_get('meilisearch', args.name)! + play(heroscript: heroscript)! +} + +fn config_save(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context := base.context()! + context.hero_config_set('meilisearch', args.name, heroscript_default()!)! +} + +fn set(o MeilisearchClient) ! { + mut o2 := obj_init(o)! + meilisearch_global['default'] = &o2 +} + +@[params] +pub struct PlayArgs { +pub mut: + name string = 'default' + heroscript string // if filled in then plbook will be made out of it + plbook ?playbook.PlayBook + reset bool + start bool + stop bool + restart bool + delete bool + configure bool // make sure there is at least one installed +} + +pub fn play(args_ PlayArgs) ! { + mut args := args_ + + if args.heroscript == '' { + args.heroscript = heroscript_default()! + } + mut plbook := args.plbook or { playbook.new(text: args.heroscript)! } + + mut install_actions := plbook.find(filter: 'meilisearch.configure')! + if install_actions.len > 0 { + for install_action in install_actions { + mut p := install_action.params + cfg_play(p)! + } + } +} + +// switch instance to be used for meilisearch +pub fn switch(name string) { + meilisearch_default = name +} diff --git a/lib/clients/meilisearch/meilisearch_model.v b/lib/clients/meilisearch/meilisearch_model.v new file mode 100644 index 00000000..663c6a41 --- /dev/null +++ b/lib/clients/meilisearch/meilisearch_model.v @@ -0,0 +1,59 @@ +module meilisearch + +import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.clients.httpconnection +import os + +pub const version = '1.0.0' +const singleton = false +const default = true + +// TODO: THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE TO STRUCT BELOW, IS STRUCTURED AS HEROSCRIPT +pub fn heroscript_default() !string { + heroscript := " + !!meilisearch.configure + name:'default' + host:'http://localhost:7700' + api_key:'be61fdce-c5d4-44bc-886b-3a484ff6c531' + " + return heroscript +} + +// THIS THE THE SOURCE OF THE INFORMATION OF THIS FILE, HERE WE HAVE THE CONFIG OBJECT CONFIGURED AND MODELLED + +pub struct MeilisearchClient { +pub mut: + name string = 'default' + api_key string @[secret] + host string +} + +fn cfg_play(p paramsparser.Params) ! { + // THIS IS EXAMPLE CODE AND NEEDS TO BE CHANGED IN LINE WITH struct above + mut mycfg := MeilisearchClient{ + name: p.get_default('name', 'default')! + host: p.get('host')! + api_key: p.get('api_key')! + } + set(mycfg)! +} + +fn obj_init(obj_ MeilisearchClient) !MeilisearchClient { + // never call get here, only thing we can do here is work on object itself + mut obj := obj_ + // set the http client + return obj +} + +fn (mut self MeilisearchClient) httpclient() !&httpconnection.HTTPConnection { + mut http_conn := httpconnection.new( + name: 'meilisearch' + url: self.host + )! + + // Add authentication header if API key is provided + if self.api_key.len > 0 { + http_conn.default_header.add(.authorization, 'Bearer ${self.api_key}') + } + return http_conn +} diff --git a/lib/clients/meilisearch/models.v b/lib/clients/meilisearch/models.v new file mode 100644 index 00000000..bf32ee1e --- /dev/null +++ b/lib/clients/meilisearch/models.v @@ -0,0 +1,166 @@ +module meilisearch + +// ClientConfig holds configuration for MeilisearchClient +pub struct ClientConfig { +pub: + host string // Base URL of Meilisearch server (e.g., "http://localhost:7700") + api_key string // Master key or API key for authentication + timeout int = 30 // Request timeout in seconds + max_retry int = 3 // Maximum number of retries for failed requests +} + +// Health represents the health status of the Meilisearch server +pub struct Health { +pub: + status string @[json: 'status'] +} + +// Version represents version information of the Meilisearch server +pub struct Version { +pub: + pkg_version string @[json: 'pkgVersion'] + commit_sha string @[json: 'commitSha'] + commit_date string @[json: 'commitDate'] +} + +// IndexSettings represents all configurable settings for an index +pub struct IndexSettings { +pub mut: + ranking_rules []string @[json: 'rankingRules'] + distinct_attribute string @[json: 'distinctAttribute'] + searchable_attributes []string @[json: 'searchableAttributes'] + displayed_attributes []string @[json: 'displayedAttributes'] + stop_words []string @[json: 'stopWords'] + synonyms map[string][]string @[json: 'synonyms'] + filterable_attributes []string @[json: 'filterableAttributes'] + sortable_attributes []string @[json: 'sortableAttributes'] + typo_tolerance TypoTolerance @[json: 'typoTolerance'] +} + +// TypoTolerance settings for controlling typo behavior +pub struct TypoTolerance { +pub mut: + enabled bool = true @[json: 'enabled'] + min_word_size_for_typos MinWordSizeForTypos @[json: 'minWordSizeForTypos'] + disable_on_words []string @[json: 'disableOnWords'] + disable_on_attributes []string @[json: 'disableOnAttributes'] +} + +// MinWordSizeForTypos controls minimum word sizes for one/two typos +pub struct MinWordSizeForTypos { +pub mut: + one_typo int = 5 @[json: 'oneTypo'] + two_typos int = 9 @[json: 'twoTypos'] +} + +// DocumentsQuery represents query parameters for document operations +pub struct DocumentsQuery { +pub mut: + limit int = 20 + offset int + fields []string + filter string + sort []string +} + +// TaskInfo represents information about an asynchronous task +pub struct TaskInfo { +pub: + uid int @[json: 'taskUid'] + index_uid string @[json: 'indexUid'] + status string @[json: 'status'] + task_type string @[json: 'type'] + details map[string]string @[json: 'details'] + error string @[json: 'error'] + duration string @[json: 'duration'] + enqueued_at string @[json: 'enqueuedAt'] + started_at string @[json: 'startedAt'] + finished_at string @[json: 'finishedAt'] +} + +// CreateIndexArgs represents the arguments for creating an index +@[params] +pub struct CreateIndexArgs { +pub mut: + uid string + primary_key string @[json: 'primaryKey'] +} + +// IndexCreation represents information about the index creation +pub struct CreateIndexResponse { +pub mut: + uid int @[json: 'taskUid'] + index_uid string @[json: 'indexUid'] + status string @[json: 'status'] + type_ string @[json: 'type'] + enqueued_at string @[json: 'enqueuedAt'] +} + +// IndexCreation represents information about the index creation +pub struct GetIndexResponse { +pub mut: + uid string @[json: 'uid'] + created_at string @[json: 'createdAt'] + updated_at string @[json: 'updatedAt'] + primary_key string @[json: 'primaryKey'] +} + +// ListIndexResponse represents information about the index list +pub struct ListResponse[T] { +pub mut: + results []T + total int + offset int + limit int +} + +// ListIndexArgs represents the arguments for listing indexes +@[params] +pub struct ListIndexArgs { +pub mut: + limit int = 20 + offset int +} + +// DeleteIndexResponse represents information about the index deletion +pub struct DeleteIndexResponse { +pub mut: + uid int @[json: 'taskUid'] + index_uid string @[json: 'indexUid'] + status string @[json: 'status'] + type_ string @[json: 'type'] + enqueued_at string @[json: 'enqueuedAt'] +} + +struct AddDocumentResponse { +pub mut: + task_uid int @[json: 'taskUid'] + index_uid string @[json: 'indexUid'] + status string + type_ string @[json: 'type'] + enqueued_at string @[json: 'enqueuedAt'] +} + +struct DeleteDocumentResponse { +pub mut: + task_uid int @[json: 'taskUid'] + index_uid string @[json: 'indexUid'] + status string + type_ string @[json: 'type'] + enqueued_at string @[json: 'enqueuedAt'] +} + +struct SearchResponse[T] { +pub mut: + hits []T @[json: 'hits'] + offset int @[json: 'offset'] + limit int @[json: 'limit'] + estimated_total_hits int @[json: 'estimatedTotalHits'] + total_hits int @[json: 'totalHits'] + total_pages int @[json: 'totalPages'] + hits_per_page int @[json: 'hitsPerPage'] + page int @[json: 'page'] + facet_stats map[string]map[string]f64 @[json: 'facetStats'] + processing_time_ms int @[json: 'processingTimeMs'] + query string @[json: 'query'] +} diff --git a/lib/clients/meilisearch/readme.md b/lib/clients/meilisearch/readme.md new file mode 100644 index 00000000..447aeef6 --- /dev/null +++ b/lib/clients/meilisearch/readme.md @@ -0,0 +1,59 @@ +## Meilisearch V Client + +This is a simple V client for interacting with a [self-hosted Meilisearch instance](https://www.meilisearch.com/docs/learn/self_hosted/getting_started_with_self_hosted_meilisearch?utm_campaign=oss&utm_medium=home-page&utm_source=docs#setup-and-installation), enabling you to perform operations such as adding, retrieving, deleting, and searching documents within indexes. + +### Getting Started with Self-Hosted Meilisearch + +To use this V client, ensure you have a **self-hosted Meilisearch instance installed and running**. + +This quick start will walk you through installing Meilisearch, adding documents, and performing your first search. + +#### Requirements + +To follow this setup, you will need `curl` installed + +### Setup and Installation + +To install Meilisearch locally, run the following command: + +```bash +# Install Meilisearch +curl -L https://install.meilisearch.com | sh +``` + +### Running Meilisearch + +Start Meilisearch with the following command, replacing `"aSampleMasterKey"` with your preferred master key: + +```bash +# Launch Meilisearch +meilisearch --master-key="aSampleMasterKey" +``` +--- + +### Running the V Client Tests + +This client includes various test cases that demonstrate common operations in Meilisearch, such as creating indexes, adding documents, retrieving documents, deleting documents, and performing searches. To run the tests, you can use the following commands: + +```bash +# Run document-related tests +v -enable-globals -stats herolib/clients/meilisearch/document_test.v + +# Run index-related tests +v -enable-globals -stats herolib/clients/meilisearch/index_test.v +``` + +### Example: Getting Meilisearch Server Version + +Here is a quick example of how to retrieve the Meilisearch server version using this V client: + +```v +import freeflowuniverse.herolib.clients.meilisearch + +mut client := meilisearch.get() or { panic(err) } +version := client.version() or { panic(err) } +println('Meilisearch version: $version') + +``` + +This example connects to your local Meilisearch instance and prints the server version to verify your setup is correct. diff --git a/lib/clients/mycelium/mycelium.v b/lib/clients/mycelium/mycelium.v new file mode 100644 index 00000000..bb5185ac --- /dev/null +++ b/lib/clients/mycelium/mycelium.v @@ -0,0 +1,109 @@ +module mycelium + +import net.http +import json + +const server_url = 'http://localhost:8989/api/v1/messages' + +pub struct MessageDestination { +pub: + pk string +} + +pub struct PushMessageBody { +pub: + dst MessageDestination + payload string +} + +pub struct InboundMessage { +pub: + id string + src_ip string @[json: 'srcIP'] + src_pk string @[json: 'srcPk'] + dst_ip string @[json: 'dstIp'] + dst_pk string @[json: 'dstPk'] + payload string +} + +pub struct MessageStatusResponse { +pub: + id string + dst string + state string + created string + deadline string + msg_len string @[json: 'msgLen'] +} + +pub fn send_msg(pk string, payload string, wait bool) !InboundMessage { + mut url := server_url + if wait { + url = '${url}?reply_timeout=120' + } + msg_req := PushMessageBody{ + dst: MessageDestination{ + pk: pk + } + payload: payload + } + mut req := http.new_request(http.Method.post, url, json.encode(msg_req)) + req.add_custom_header('content-type', 'application/json')! + if wait { + req.read_timeout = 1200000000000 + } + res := req.do()! + msg := json.decode(InboundMessage, res.body)! + return msg +} + +pub fn receive_msg(wait bool) !InboundMessage { + mut url := server_url + if wait { + url = '${url}?timeout=60' + } + mut req := http.new_request(http.Method.get, url, '') + if wait { + req.read_timeout = 600000000000 + } + res := req.do()! + msg := json.decode(InboundMessage, res.body)! + return msg +} + +pub fn receive_msg_opt(wait bool) ?InboundMessage { + mut url := server_url + if wait { + url = '${url}?timeout=60' + } + mut req := http.new_request(http.Method.get, url, '') + if wait { + req.read_timeout = 600000000000 + } + res := req.do() or { panic(error) } + if res.status_code == 204 { + return none + } + msg := json.decode(InboundMessage, res.body) or { panic(err) } + return msg +} + +pub fn get_msg_status(id string) !MessageStatusResponse { + mut url := '${server_url}/status/${id}' + res := http.get(url)! + msg_res := json.decode(MessageStatusResponse, res.body)! + return msg_res +} + +pub fn reply_msg(id string, pk string, payload string) !http.Status { + mut url := '${server_url}/reply/${id}' + msg_req := PushMessageBody{ + dst: MessageDestination{ + pk: pk + } + payload: payload + } + + res := http.post_json(url, json.encode(msg_req))! + return res.status() +} diff --git a/lib/clients/openai/actions.v b/lib/clients/openai/actions.v new file mode 100644 index 00000000..d0a561e9 --- /dev/null +++ b/lib/clients/openai/actions.v @@ -0,0 +1,23 @@ +module openai + +// run heroscript starting from path, text or giturl +//``` +// !!OpenAIclient.define +// name:'default' +// openaikey: '' +// description:'...' +//``` +pub fn heroplay(mut plbook playbook.PlayBook) ! { + for mut action in plbook.find(filter: 'openaiclient.define')! { + mut p := action.params + instance := p.get_default('instance', 'default')! + // cfg.keyname = p.get('keyname')! + mut cl := get(instance, + openaikey: p.get('openaikey')! + description: p.get_default('description', '')! + )! + cl.config_save()! + } +} + +//>TODO: this needs to be extended to chats, ... diff --git a/lib/clients/openai/audio.v b/lib/clients/openai/audio.v new file mode 100644 index 00000000..4526644e --- /dev/null +++ b/lib/clients/openai/audio.v @@ -0,0 +1,110 @@ +module openai + +import json +import freeflowuniverse.herolib.clients.httpconnection +import os +import net.http + +pub enum AudioRespType { + json + text + srt + verbose_json + vtt +} + +const audio_model = 'whisper-1' +const audio_mime_types = { + '.mp3': 'audio/mpeg' + '.mp4': 'audio/mp4' + '.mpeg': 'audio/mpeg' + '.mpga': 'audio/mp4' + '.m4a': 'audio/mp4' + '.wav': 'audio/vnd.wav' + '.webm': 'application/octet-stream' +} + +fn audio_resp_type_str(i AudioRespType) string { + return match i { + .json { + 'json' + } + .text { + 'text' + } + .srt { + 'srt' + } + .verbose_json { + 'verbose_json' + } + .vtt { + 'vtt' + } + } +} + +pub struct AudioArgs { +pub mut: + filepath string + prompt string + response_format AudioRespType + temperature int + language string +} + +pub struct AudioResponse { +pub mut: + text string +} + +// create transcription from an audio file +// supported audio formats are mp3, mp4, mpeg, mpga, m4a, wav, or webm +pub fn (mut f OpenAIClient[Config]) create_transcription(args AudioArgs) !AudioResponse { + return f.create_audio_request(args, 'audio/transcriptions') +} + +// create translation to english from an audio file +// supported audio formats are mp3, mp4, mpeg, mpga, m4a, wav, or webm +pub fn (mut f OpenAIClient[Config]) create_tranlation(args AudioArgs) !AudioResponse { + return f.create_audio_request(args, 'audio/translations') +} + +fn (mut f OpenAIClient[Config]) create_audio_request(args AudioArgs, endpoint string) !AudioResponse { + file_content := os.read_file(args.filepath)! + ext := os.file_ext(args.filepath) + mut file_mime_type := '' + if ext in audio_mime_types { + file_mime_type = audio_mime_types[ext] + } else { + return error('file extenion not supported') + } + + file_data := http.FileData{ + filename: os.base(args.filepath) + content_type: file_mime_type + data: file_content + } + + form := http.PostMultipartFormConfig{ + files: { + 'file': [file_data] + } + form: { + 'model': audio_model + 'prompt': args.prompt + 'response_format': audio_resp_type_str(args.response_format) + 'temperature': args.temperature.str() + 'language': args.language + } + } + + req := httpconnection.Request{ + prefix: endpoint + } + r := f.connection.post_multi_part(req, form)! + if r.status_code != 200 { + return error('got error from server: ${r.body}') + } + return json.decode(AudioResponse, r.body)! +} diff --git a/lib/clients/openai/completions.v b/lib/clients/openai/completions.v new file mode 100644 index 00000000..8ed34b22 --- /dev/null +++ b/lib/clients/openai/completions.v @@ -0,0 +1,70 @@ +module openai + +import json + +pub struct ChatCompletion { +pub mut: + id string + object string + created u32 + choices []Choice + usage Usage +} + +pub struct Choice { +pub mut: + index int + message MessageRaw + finish_reason string +} + +pub struct Message { +pub mut: + role RoleType + content string +} + +pub struct Usage { +pub mut: + prompt_tokens int + completion_tokens int + total_tokens int +} + +pub struct Messages { +pub mut: + messages []Message +} + +pub struct MessageRaw { +pub mut: + role string + content string +} + +struct ChatMessagesRaw { +mut: + model string + messages []MessageRaw +} + +// creates a new chat completion given a list of messages +// each message consists of message content and the role of the author +pub fn (mut f OpenAIClient[Config]) chat_completion(model_type ModelType, msgs Messages) !ChatCompletion { + model_type0 := modelname_str(model_type) + mut m := ChatMessagesRaw{ + model: model_type0 + } + for msg in msgs.messages { + mr := MessageRaw{ + role: roletype_str(msg.role) + content: msg.content + } + m.messages << mr + } + data := json.encode(m) + r := f.connection.post_json_str(prefix: 'chat/completions', data: data)! + + res := json.decode(ChatCompletion, r)! + return res +} diff --git a/lib/clients/openai/embeddings.v b/lib/clients/openai/embeddings.v new file mode 100644 index 00000000..f179e291 --- /dev/null +++ b/lib/clients/openai/embeddings.v @@ -0,0 +1,54 @@ +module openai + +import json + +pub enum EmbeddingModel { + text_embedding_ada +} + +fn embedding_model_str(e EmbeddingModel) string { + return match e { + .text_embedding_ada { + 'text-embedding-ada-002' + } + } +} + +@[params] +pub struct EmbeddingCreateArgs { + input []string @[required] + model EmbeddingModel @[required] + user string +} + +pub struct EmbeddingCreateRequest { + input []string + model string + user string +} + +pub struct Embedding { +pub mut: + object string + embedding []f32 + index int +} + +pub struct EmbeddingResponse { +pub mut: + object string + data []Embedding + model string + usage Usage +} + +pub fn (mut f OpenAIClient[Config]) create_embeddings(args EmbeddingCreateArgs) !EmbeddingResponse { + req := EmbeddingCreateRequest{ + input: args.input + model: embedding_model_str(args.model) + user: args.user + } + data := json.encode(req) + r := f.connection.post_json_str(prefix: 'embeddings', data: data)! + return json.decode(EmbeddingResponse, r)! +} diff --git a/lib/clients/openai/factory.v b/lib/clients/openai/factory.v new file mode 100644 index 00000000..ed623149 --- /dev/null +++ b/lib/clients/openai/factory.v @@ -0,0 +1,64 @@ +module openai + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.ui as gui +import freeflowuniverse.herolib.clients.httpconnection + +// import freeflowuniverse.herolib.ui.console + +pub struct OpenAIClient[T] { + base.BaseConfig[T] +pub mut: + connection &httpconnection.HTTPConnection +} + +@[params] +pub struct Config { +pub mut: + openaikey string @[secret] + description string +} + +pub fn get(instance string, cfg Config) !OpenAIClient[Config] { + mut self := OpenAIClient[Config]{ + connection: &httpconnection.HTTPConnection{} + } + + if cfg.openaikey.len > 0 { + // first the type of the instance, then name of instance, then action + self.init('openaiclient', instance, .set, cfg)! + } else { + self.init('openaiclient', instance, .get)! + } + + mut conn := httpconnection.new( + name: 'openai' + url: 'https://api.openai.com/v1/' + )! + conn.default_header.add(.authorization, 'Bearer ${self.config()!.openaikey}') + // req.add_custom_header('x-disable-pagination', 'True') ! + + self.connection = conn + return self +} + +// get a new OpenAI client, will create if it doesn't exist or ask for new configuration +pub fn configure(instance_ string) ! { + mut cfg := Config{} + mut ui := gui.new()! + + mut instance := instance_ + if instance == '' { + instance = ui.ask_question( + question: 'name for Dagu client' + default: instance + )! + } + + cfg.openaikey = ui.ask_question( + question: '\nPlease specify your openai secret (instance:${instance}).' + )! + + get(instance, cfg)! +} diff --git a/lib/clients/openai/files.v b/lib/clients/openai/files.v new file mode 100644 index 00000000..a9a443d5 --- /dev/null +++ b/lib/clients/openai/files.v @@ -0,0 +1,90 @@ +module openai + +import json +import freeflowuniverse.herolib.clients.httpconnection +import os +import net.http + +const jsonl_mime_type = 'text/jsonl' + +@[params] +pub struct FileUploadArgs { +pub: + filepath string + purpose string +} + +pub struct File { +pub mut: + id string + object string + bytes int + created_at int + filename string + purpose string +} + +pub struct Files { +pub mut: + data []File +} + +pub struct DeleteResp { +pub mut: + id string + object string + deleted bool +} + +// upload file to client org, usually used for fine tuning +pub fn (mut f OpenAIClient[Config]) upload_file(args FileUploadArgs) !File { + file_content := os.read_file(args.filepath)! + + file_data := http.FileData{ + filename: os.base(args.filepath) + data: file_content + content_type: jsonl_mime_type + } + + form := http.PostMultipartFormConfig{ + files: { + 'file': [file_data] + } + form: { + 'purpose': args.purpose + } + } + + req := httpconnection.Request{ + prefix: 'files' + } + r := f.connection.post_multi_part(req, form)! + if r.status_code != 200 { + return error('got error from server: ${r.body}') + } + return json.decode(File, r.body)! +} + +// list all files in client org +pub fn (mut f OpenAIClient[Config]) list_files() !Files { + r := f.connection.get(prefix: 'files')! + return json.decode(Files, r)! +} + +// deletes a file +pub fn (mut f OpenAIClient[Config]) delete_file(file_id string) !DeleteResp { + r := f.connection.delete(prefix: 'files/' + file_id)! + return json.decode(DeleteResp, r)! +} + +// returns a single file metadata +pub fn (mut f OpenAIClient[Config]) get_file(file_id string) !File { + r := f.connection.get(prefix: 'files/' + file_id)! + return json.decode(File, r)! +} + +// returns the content of a specific file +pub fn (mut f OpenAIClient[Config]) get_file_content(file_id string) !string { + r := f.connection.get(prefix: 'files/' + file_id + '/content')! + return r +} diff --git a/lib/clients/openai/fine_tunes.v b/lib/clients/openai/fine_tunes.v new file mode 100644 index 00000000..99dce686 --- /dev/null +++ b/lib/clients/openai/fine_tunes.v @@ -0,0 +1,93 @@ +module openai + +import json + +pub struct FineTune { +pub: + id string + object string + model string + created_at int + events []FineTuneEvent + fine_tuned_model string + hyperparams FineTuneHyperParams + organization_id string + result_files []File + status string + validation_files []File + training_files []File + updated_at int +} + +pub struct FineTuneEvent { +pub: + object string + created_at int + level string + message string +} + +pub struct FineTuneHyperParams { +pub: + batch_size int + learning_rate_multiplier f64 + n_epochs int + prompt_loss_weight f64 +} + +pub struct FineTuneList { +pub: + object string + data []FineTune +} + +pub struct FineTuneEventList { +pub: + object string + data []FineTuneEvent +} + +@[params] +pub struct FineTuneCreateArgs { +pub mut: + training_file string @[required] + model string + n_epochs int = 4 + batch_size int + learning_rate_multiplier f32 + prompt_loss_weight f64 + compute_classification_metrics bool + suffix string +} + +// creates a new fine-tune based on an already uploaded file +pub fn (mut f OpenAIClient[Config]) create_fine_tune(args FineTuneCreateArgs) !FineTune { + data := json.encode(args) + r := f.connection.post_json_str(prefix: 'fine-tunes', data: data)! + + return json.decode(FineTune, r)! +} + +// returns all fine-tunes in this account +pub fn (mut f OpenAIClient[Config]) list_fine_tunes() !FineTuneList { + r := f.connection.get(prefix: 'fine-tunes')! + return json.decode(FineTuneList, r)! +} + +// get a single fine-tune information +pub fn (mut f OpenAIClient[Config]) get_fine_tune(fine_tune string) !FineTune { + r := f.connection.get(prefix: 'fine-tunes/' + fine_tune)! + return json.decode(FineTune, r)! +} + +// cancel a fine-tune that didn't finish yet +pub fn (mut f OpenAIClient[Config]) cancel_fine_tune(fine_tune string) !FineTune { + r := f.connection.post_json_str(prefix: 'fine-tunes/' + fine_tune + '/cancel')! + return json.decode(FineTune, r)! +} + +// returns all events for a fine tune in this account +pub fn (mut f OpenAIClient[Config]) list_fine_tune_events(fine_tune string) !FineTuneEventList { + r := f.connection.get(prefix: 'fine-tunes/' + fine_tune + '/events')! + return json.decode(FineTuneEventList, r)! +} diff --git a/lib/clients/openai/images.v b/lib/clients/openai/images.v new file mode 100644 index 00000000..4061a60e --- /dev/null +++ b/lib/clients/openai/images.v @@ -0,0 +1,189 @@ +module openai + +import json +import net.http +import os +import freeflowuniverse.herolib.clients.httpconnection + +const image_mine_type = 'image/png' + +pub enum ImageSize { + size_256_256 + size_512_512 + size_1024_1024 +} + +fn image_size_str(i ImageSize) string { + return match i { + .size_256_256 { + '256x256' + } + .size_512_512 { + '512x512' + } + .size_1024_1024 { + '1024x1024' + } + } +} + +pub enum ImageRespType { + url + b64_json +} + +fn image_resp_type_str(i ImageRespType) string { + return match i { + .url { + 'url' + } + .b64_json { + 'b64_json' + } + } +} + +pub struct ImageCreateArgs { +pub mut: + prompt string + num_images int + size ImageSize + format ImageRespType + user string +} + +pub struct ImageEditArgs { +pub mut: + image_path string + mask_path string + prompt string + num_images int + size ImageSize + format ImageRespType + user string +} + +pub struct ImageVariationArgs { +pub mut: + image_path string + num_images int + size ImageSize + format ImageRespType + user string +} + +pub struct ImageRequest { +pub mut: + prompt string + n int + size string + response_format string + user string +} + +pub struct ImageResponse { +pub mut: + url string + b64_json string +} + +pub struct Images { +pub mut: + created int + data []ImageResponse +} + +// Create new images generation given a prompt +// the amount of images returned is specified by `num_images` +pub fn (mut f OpenAIClient[Config]) create_image(args ImageCreateArgs) !Images { + image_size := image_size_str(args.size) + response_format := image_resp_type_str(args.format) + request := ImageRequest{ + prompt: args.prompt + n: args.num_images + size: image_size + response_format: response_format + user: args.user + } + data := json.encode(request) + r := f.connection.post_json_str(prefix: 'images/generations', data: data)! + return json.decode(Images, r)! +} + +// edit images generation given a prompt and an existing image +// image needs to be in PNG format and transparent or else a mask of the same size needs +// to be specified to indicate where the image should be in the generated image +// the amount of images returned is specified by `num_images` +pub fn (mut f OpenAIClient[Config]) create_edit_image(args ImageEditArgs) !Images { + image_content := os.read_file(args.image_path)! + image_file := http.FileData{ + filename: os.base(args.image_path) + content_type: image_mine_type + data: image_content + } + mut mask_file := []http.FileData{} + if args.mask_path != '' { + mask_content := os.read_file(args.mask_path)! + mask_file << http.FileData{ + filename: os.base(args.mask_path) + content_type: image_mine_type + data: mask_content + } + } + + form := http.PostMultipartFormConfig{ + files: { + 'image': [image_file] + 'mask': mask_file + } + form: { + 'prompt': args.prompt + 'n': args.num_images.str() + 'response_format': image_resp_type_str(args.format) + 'size': image_size_str(args.size) + 'user': args.user + } + } + + req := httpconnection.Request{ + prefix: 'images/edits' + } + r := f.connection.post_multi_part(req, form)! + if r.status_code != 200 { + return error('got error from server: ${r.body}') + } + return json.decode(Images, r.body)! +} + +// create variations of the given image +// image needs to be in PNG format +// the amount of images returned is specified by `num_images` +pub fn (mut f OpenAIClient[Config]) create_variation_image(args ImageVariationArgs) !Images { + image_content := os.read_file(args.image_path)! + image_file := http.FileData{ + filename: os.base(args.image_path) + content_type: image_mine_type + data: image_content + } + + form := http.PostMultipartFormConfig{ + files: { + 'image': [image_file] + } + form: { + 'n': args.num_images.str() + 'response_format': image_resp_type_str(args.format) + 'size': image_size_str(args.size) + 'user': args.user + } + } + + req := httpconnection.Request{ + prefix: 'images/variations' + } + r := f.connection.post_multi_part(req, form)! + if r.status_code != 200 { + return error('got error from server: ${r.body}') + } + return json.decode(Images, r.body)! +} diff --git a/lib/clients/openai/model_enums.v b/lib/clients/openai/model_enums.v new file mode 100644 index 00000000..ed390599 --- /dev/null +++ b/lib/clients/openai/model_enums.v @@ -0,0 +1,75 @@ +module openai + +pub enum ModelType { + gpt_3_5_turbo + gpt_4 + gpt_4_0613 + gpt_4_32k + gpt_4_32k_0613 + gpt_3_5_turbo_0613 + gpt_3_5_turbo_16k + gpt_3_5_turbo_16k_0613 + whisper_1 +} + +fn modelname_str(e ModelType) string { + if e == .gpt_4 { + return 'gpt-4' + } + if e == .gpt_3_5_turbo { + return 'gpt-3.5-turbo' + } + return match e { + .gpt_4 { + 'gpt-4' + } + .gpt_3_5_turbo { + 'gpt-3.5-turbo' + } + .gpt_4_0613 { + 'gpt-4-0613' + } + .gpt_4_32k { + 'gpt-4-32k' + } + .gpt_4_32k_0613 { + 'gpt-4-32k-0613' + } + .gpt_3_5_turbo_0613 { + 'gpt-3.5-turbo-0613' + } + .gpt_3_5_turbo_16k { + 'gpt-3.5-turbo-16k' + } + .gpt_3_5_turbo_16k_0613 { + 'gpt-3.5-turbo-16k-0613' + } + .whisper_1 { + 'whisper-1' + } + } +} + +pub enum RoleType { + system + user + assistant + function +} + +fn roletype_str(x RoleType) string { + return match x { + .system { + 'system' + } + .user { + 'user' + } + .assistant { + 'assistant' + } + .function { + 'function' + } + } +} diff --git a/lib/clients/openai/models.v b/lib/clients/openai/models.v new file mode 100644 index 00000000..70ce1b77 --- /dev/null +++ b/lib/clients/openai/models.v @@ -0,0 +1,46 @@ +module openai + +import json + +pub struct Model { +pub mut: + id string + created int + object string + owned_by string + root string + parent string + permission []ModelPermission +} + +pub struct ModelPermission { +pub mut: + id string + created int + object string + allow_create_engine bool + allow_sampling bool + allow_logprobs bool + allow_search_indices bool + allow_view bool + allow_fine_tuning bool + organization string + is_blocking bool +} + +pub struct Models { +pub mut: + data []Model +} + +// list current models available in Open AI +pub fn (mut f OpenAIClient[Config]) list_models() !Models { + r := f.connection.get(prefix: 'models')! + return json.decode(Models, r)! +} + +// returns details of a model using the model id +pub fn (mut f OpenAIClient[Config]) get_model(model string) !Model { + r := f.connection.get(prefix: 'models/' + model)! + return json.decode(Model, r)! +} diff --git a/lib/clients/openai/moderation.v b/lib/clients/openai/moderation.v new file mode 100644 index 00000000..bf63b39a --- /dev/null +++ b/lib/clients/openai/moderation.v @@ -0,0 +1,80 @@ +module openai + +import json + +pub enum ModerationModel { + text_moderation_latest + text_moderation_stable +} + +fn moderation_model_str(m ModerationModel) string { + return match m { + .text_moderation_latest { + 'text-moderation-latest' + } + .text_moderation_stable { + 'text-moderation-stable' + } + } +} + +@[params] +pub struct ModerationRequest { +mut: + input string + model string +} + +pub struct ModerationResult { +pub mut: + categories ModerationResultCategories + category_scores ModerationResultCategoryScores + flagged bool +} + +pub struct ModerationResultCategories { +pub mut: + sexual bool + hate bool + harassment bool + selfharm bool @[json: 'self-harm'] + sexual_minors bool @[json: 'sexual/minors'] + hate_threatening bool @[json: 'hate/threatening'] + violence_graphic bool @[json: 'violence/graphic'] + selfharm_intent bool @[json: 'self-harm/intent'] + selfharm_instructions bool @[json: 'self-harm/instructions'] + harassment_threatening bool @[json: 'harassment/threatening'] + violence bool +} + +pub struct ModerationResultCategoryScores { +pub mut: + sexual f32 + hate f32 + harassment f32 + selfharm f32 @[json: 'self-harm'] + sexual_minors f32 @[json: 'sexual/minors'] + hate_threatening f32 @[json: 'hate/threatening'] + violence_graphic f32 @[json: 'violence/graphic'] + selfharm_intent f32 @[json: 'self-harm/intent'] + selfharm_instructions f32 @[json: 'self-harm/instructions'] + harassment_threatening f32 @[json: 'harassment/threatening'] + violence f32 +} + +pub struct ModerationResponse { +pub mut: + id string + model string + results []ModerationResult +} + +pub fn (mut f OpenAIClient[Config]) create_moderation(input string, model ModerationModel) !ModerationResponse { + req := ModerationRequest{ + input: input + model: moderation_model_str(model) + } + data := json.encode(req) + r := f.connection.post_json_str(prefix: 'moderations', data: data)! + return json.decode(ModerationResponse, r)! +} diff --git a/lib/clients/openai/readme.md b/lib/clients/openai/readme.md new file mode 100644 index 00000000..08827c57 --- /dev/null +++ b/lib/clients/openai/readme.md @@ -0,0 +1,50 @@ +# OpenAI + +An implementation of an OpenAI client using Vlang. + +## Supported methods + +- List available models +- Chat Completion +- Translate Audio +- Transcribe Audio +- Create image based on prompt +- Edit an existing image +- Create variation of an image + +## Usage + +To use the client you need a OpenAi key which can be generated from [here](https://platform.openai.com/account/api-keys). + +The key should be exposed in an environment variable as following: + +```bash +export OPENAI_API_KEY= +``` + +To get a new instance of the client: + +```v +import freeflowuniverse.herolib.clients.openai + +ai_cli := openai.new()! +``` + +Then it is possible to perform all the listed operations: + +```v +// listing models +models := ai_cli.list_models()! + +// creating a new chat completion + +mut msg := []op.Message{} +msg << op.Message{ + role: op.RoleType.user + content: 'Say this is a test!' +} +mut msgs := op.Messages{ + messages: msg +} +res := ai_cli.chat_completion(op.ModelType.gpt_3_5_turbo, msgs)! +``` diff --git a/lib/clients/postgres/client.v b/lib/clients/postgres/client.v new file mode 100644 index 00000000..8526515c --- /dev/null +++ b/lib/clients/postgres/client.v @@ -0,0 +1,56 @@ +module postgres + +import freeflowuniverse.herolib.core.base +import db.pg +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console + +// pub struct PostgresClient { +// base.BaseConfig +// pub mut: +// config Config +// db pg.DB +// } + +// @[params] +// pub struct ClientArgs { +// pub mut: +// instance string @[required] +// // playargs ?play.PlayArgs +// } + +// pub fn get(clientargs ClientArgs) !PostgresClient { +// // mut plargs := clientargs.playargs or { +// // // play.PlayArgs +// // // { +// // // } +// // } + +// // mut cfg := configurator(clientargs.instance, plargs)! +// // mut args := cfg.get()! + +// args.instance = texttools.name_fix(args.instance) +// if args.instance == '' { +// args.instance = 'default' +// } +// // console.print_debug(args) +// mut db := pg.connect( +// host: args.host +// user: args.user +// port: args.port +// password: args.password +// dbname: args.dbname +// )! +// // console.print_debug(postgres_client) +// return PostgresClient{ +// instance: args.instance +// db: db +// config: args +// } +// } + +struct LocalConfig { + name string + path string + passwd string +} diff --git a/lib/clients/postgres/cmds.v b/lib/clients/postgres/cmds.v new file mode 100644 index 00000000..32d0e846 --- /dev/null +++ b/lib/clients/postgres/cmds.v @@ -0,0 +1,107 @@ +module postgres + +import db.pg +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.osal +import os +import freeflowuniverse.herolib.ui.console + +pub fn (mut self PostgresClient[Config]) check() ! { + mut db := self.db + db.exec('SELECT version();') or { return error('can\t select version from database.\n${self}') } +} + +pub fn (mut self PostgresClient[Config]) exec(c_ string) ![]pg.Row { + mut db := self.db + mut c := c_ + if !(c.trim_space().ends_with(';')) { + c += ';' + } + + config := self.config()! + return db.exec(c) or { + return error('can\t execute query on ${config.host}:${config.dbname}.\n${c}\n${err}') + } +} + +pub fn (mut self PostgresClient[Config]) db_exists(name_ string) !bool { + mut db := self.db + r := db.exec("SELECT datname FROM pg_database WHERE datname='${name_}';")! + if r.len == 1 { + // console.print_header(' db exists: ${name_}') + return true + } + if r.len > 1 { + return error('should not have more than 1 db with name ${name_}') + } + return false +} + +pub fn (mut self PostgresClient[Config]) db_create(name_ string) ! { + name := texttools.name_fix(name_) + mut db := self.db + db_exists := self.db_exists(name_)! + if !db_exists { + console.print_header(' db create: ${name}') + db.exec('CREATE DATABASE ${name};')! + } + db_exists2 := self.db_exists(name_)! + if !db_exists2 { + return error('Could not create db: ${name_}, could not find in DB.') + } +} + +pub fn (mut self PostgresClient[Config]) db_delete(name_ string) ! { + mut db := self.db + name := texttools.name_fix(name_) + self.check()! + db_exists := self.db_exists(name_)! + if db_exists { + console.print_header(' db delete: ${name_}') + db.exec('DROP DATABASE ${name};')! + } + db_exists2 := self.db_exists(name_)! + if db_exists2 { + return error('Could not delete db: ${name_}, could not find in DB.') + } +} + +pub fn (mut self PostgresClient[Config]) db_names() ![]string { + mut res := []string{} + sqlstr := "SELECT datname FROM pg_database WHERE datistemplate = false and datname != 'postgres' and datname != 'root';" + for row in self.exec(sqlstr)! { + v := row.vals[0] or { '' } + res << v or { '' } + } + return res +} + +@[params] +pub struct BackupParams { +pub mut: + dbname string + dest string +} + +pub fn (mut self PostgresClient[Config]) backup(args BackupParams) ! { + if args.dest == '' { + return error('specify the destination please') + } + if !os.exists(args.dest) { + os.mkdir_all(args.dest)! + } + + if args.dbname == '' { + for dbname in self.db_names()! { + self.backup(dbname: dbname, dest: args.dest)! + } + } else { + config := self.config()! + cmd := ' + export PGPASSWORD=\'${config.password}\' + pg_dump -h ${config.host} -p ${config.port} -U ${config.user} --dbname=${args.dbname} --format=c > "${args.dest}/${args.dbname}.bak" + ' // console.print_debug(cmd) + + osal.exec(cmd: cmd, stdout: true)! + } +} diff --git a/lib/clients/postgres/configure.v b/lib/clients/postgres/configure.v new file mode 100644 index 00000000..f0c2233d --- /dev/null +++ b/lib/clients/postgres/configure.v @@ -0,0 +1,91 @@ +module postgres + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.ui +import freeflowuniverse.herolib.ui.console + +@[params] +pub struct Config { +pub mut: + instance string = 'default' + user string = 'root' + port int = 5432 + host string = 'localhost' + password string + dbname string = 'postgres' + heroscript string + reset bool +} + +pub fn configure(instance string, cfg_ Config) !PostgresClient[Config] { + mut config := cfg_ + + mut server := PostgresClient[Config]{} + server.init('postgres', instance, .set, config)! + return get(instance)! +} + +pub fn configure_interactive(args_ Config, mut session base.Session) ! { + mut args := args_ + mut myui := ui.new()! + + console.clear() + console.print_debug('\n## Configure Postgres Client') + console.print_debug('============================\n\n') + + instance := myui.ask_question( + question: 'name for postgres client' + default: args.instance + )! + + args.user = myui.ask_question( + question: 'user' + minlen: 3 + default: args.user + )! + + args.password = myui.ask_question( + question: 'password' + minlen: 3 + default: args.password + )! + + args.dbname = myui.ask_question( + question: 'dbname' + minlen: 3 + default: args.dbname + )! + + args.host = myui.ask_question( + question: 'host' + minlen: 3 + default: args.host + )! + mut port := myui.ask_question( + question: 'port' + default: '${args.port}' + )! + args.port = port.int() + + mut client := PostgresClient[Config]{} + client.init('postgres', instance, .set, args)! +} + +// pub fn play_session(mut session base.Session) ! { +// for mut action in session.plbook.find(filter: 'postgresclient.define')! { +// mut p := action.params +// mut args := config() +// panic('implement') +// // args.instance = p.get_default('name','')! +// // if args.instance == ""{ +// // args.instance = p.get_default('instance', 'default')! +// // } +// // args.mail_from = p.get('mail_from')! +// // args.smtp_addr = p.get('smtp_addr')! +// // args.smtp_login = p.get('smtp_login')! +// // args.smtp_passwd = p.get('smtp_passwd')! +// // args.smpt_port = p.get_int('smpt_port')! +// // mut c:=configurator(args.instance,session:session)! +// // c.set(args)! +// } +// } diff --git a/lib/clients/postgres/factory.v b/lib/clients/postgres/factory.v new file mode 100644 index 00000000..c2a65422 --- /dev/null +++ b/lib/clients/postgres/factory.v @@ -0,0 +1,29 @@ +module postgres + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.ui as gui +import freeflowuniverse.herolib.ui.console +import db.pg + +pub struct PostgresClient[T] { + base.BaseConfig[T] +pub mut: + db pg.DB +} + +pub fn get(instance string) !PostgresClient[Config] { + mut self := PostgresClient[Config]{} + self.init('postgres', instance, .get)! + config := self.config()! + + mut db := pg.connect( + host: config.host + user: config.user + port: config.port + password: config.password + dbname: config.dbname + )! + + self.db = db + return self +} diff --git a/lib/clients/postgres/readme.md b/lib/clients/postgres/readme.md new file mode 100644 index 00000000..45753cb5 --- /dev/null +++ b/lib/clients/postgres/readme.md @@ -0,0 +1,76 @@ +# postgres client + +## use hero to work with postgres + +```bash + +Usage: hero postgres [flags] [commands] + +manage postgresql + +Flags: + -help Prints help information. + -man Prints the auto-generated manpage. + +Commands: + exec execute a query + check check the postgresql connection + configure configure a postgresl connection. + backup backup + print print configure info. + list list databases + +``` + +## configure + +the postgres configuration is stored on the filesystem for further use, can be configured as follows + +```v +import freeflowuniverse.herolib.clients.postgres + +postgres.configure(name:'default', + user :'root' + port : 5432 + host : 'localhost' + password : 'ssss' + dbname :'postgres')! + +mut db:=postgres.get(name:'default')! + +``` + +## configure through heroscript + +```v +import freeflowuniverse.herolib.clients.postgres + +heroscript:=' +!!postgresclient.define name:'default' + //TO IMPLEMENT +' + + +postgres.configure(heroscript:heroscript)! + + +//can also be done through get directly +mut cl:=postgres.get(reset:true,name:'default',heroscript:heroscript) + +``` + + +## some postgresql cmds + +```v +import freeflowuniverse.herolib.clients.postgres + +mut cl:=postgres.get()! //will default get postgres client with name 'default' + +cl.db_exists("mydb")! + +``` + +## use the good module of v + +- [https://modules.vlang.io/db.pg.html#DB.exec](https://modules.vlang.io/db.pg.html#DB.exec) \ No newline at end of file diff --git a/lib/clients/redisclient/factory.v b/lib/clients/redisclient/factory.v new file mode 100644 index 00000000..2f1be5ab --- /dev/null +++ b/lib/clients/redisclient/factory.v @@ -0,0 +1,58 @@ +module redisclient + +// original code see https://github.com/patrickpissurno/vredis/blob/master/vredis_test.v +// credits see there as well (-: +import net +// import sync +// import strconv + +__global ( + redis_connections []Redis +) + +const default_read_timeout = net.infinite_timeout + +@[heap] +pub struct Redis { +pub: + addr string +mut: + socket net.TcpConn +} + +// https://redis.io/topics/protocol +// examples: +// localhost:6379 +// /tmp/redis-default.sock +pub fn new(addr string) !Redis { + // lock redis_connections { + for mut conn in redis_connections { + if conn.addr == addr { + return conn + } + } + // means there is no connection yet + mut r := Redis{ + addr: addr + } + r.socket_connect()! + redis_connections << r + return r + //} + // panic("bug") +} + +pub fn reset() ! { + // lock redis_connections { + for mut conn in redis_connections { + conn.disconnect() + } + redis_connections = []Redis{} + //} +} + +pub fn checkempty() { + // lock redis_connections { + assert redis_connections.len == 0 + //} +} diff --git a/lib/clients/redisclient/readme.md b/lib/clients/redisclient/readme.md new file mode 100644 index 00000000..831efad0 --- /dev/null +++ b/lib/clients/redisclient/readme.md @@ -0,0 +1,19 @@ +# Redisclient + +## basic example to connect to local redis on 127.0.0.1:6379 + +```v + +import freeflowuniverse.herolib.clients.redisclient + +mut redis := redisclient.core_get()! +redis.set('test', 'some data') or { panic('set' + err.str() + '\n' + c.str()) } +r := redis.get('test')? +if r != 'some data' { + panic('get error different result.' + '\n' + c.str()) +} + +``` + +> redis commands can be found on https://redis.io/commands/ + diff --git a/lib/clients/redisclient/rediscache.v b/lib/clients/redisclient/rediscache.v new file mode 100644 index 00000000..f2148f4d --- /dev/null +++ b/lib/clients/redisclient/rediscache.v @@ -0,0 +1,57 @@ +module redisclient + +import freeflowuniverse.herolib.ui.console + +pub struct RedisCache { +mut: + redis &Redis @[str: skip] + namespace string + enabled bool = true +} + +// return a cache object starting from a redis connection +pub fn (mut r Redis) cache(namespace string) RedisCache { + return RedisCache{ + redis: &r + namespace: namespace + } +} + +pub fn (mut h RedisCache) get(key string) ?string { + if !h.enabled { + return none + } + key2 := h.namespace + ':' + key + hit := h.redis.get('cache:${key2}') or { + console.print_debug('[-] cache: cache miss, ${key2}') + return none + } + + console.print_debug('[+] cache: cache hit: ${key2}') + return hit +} + +pub fn (mut h RedisCache) set(key string, val string, expire int) ! { + if !h.enabled { + return + } + + key2 := h.namespace + ':' + key + h.redis.set_ex('cache:${key2}', val, expire.str())! +} + +pub fn (mut h RedisCache) exists(key string) bool { + h.get(key) or { return false } + return true +} + +pub fn (mut h RedisCache) reset() ! { + key_check := 'cache:' + h.namespace + // console.print_debug(key_check) + keys := h.redis.keys(key_check)! + // console.print_debug(keys) + for key in keys { + // console.print_debug(key) + h.redis.del(key)! + } +} diff --git a/lib/clients/redisclient/redisclient_commands.v b/lib/clients/redisclient/redisclient_commands.v new file mode 100644 index 00000000..b5d2f050 --- /dev/null +++ b/lib/clients/redisclient/redisclient_commands.v @@ -0,0 +1,305 @@ +module redisclient + +import freeflowuniverse.herolib.data.resp +import time + +pub fn (mut r Redis) ping() !string { + return r.send_expect_strnil(['PING']) +} + +pub fn (mut r Redis) set(key string, value string) ! { + return r.send_expect_ok(['SET', key, value]) +} + +pub fn (mut r Redis) set_ex(key string, value string, ex string) ! { + return r.send_expect_ok(['SET', key, value, 'EX', ex]) +} + +pub fn (mut r Redis) set_opts(key string, value string, opts SetOpts) !bool { + ex := if opts.ex == -4 && opts.px == -4 { + '' + } else if opts.ex != -4 { + ' EX ${opts.ex}' + } else { + ' PX ${opts.px}' + } + nx := if opts.nx == false && opts.xx == false { + '' + } else if opts.nx == true { + ' NX' + } else { + ' XX' + } + keep_ttl := if opts.keep_ttl == false { '' } else { ' KEEPTTL' } + message := 'SET "${key}" "${value}"${ex}${nx}${keep_ttl}\r\n' + r.write(message.bytes()) or { return false } + time.sleep(1 * time.millisecond) + res := r.read_line()! + match res { + '+OK\r\n' { + return true + } + else { + return false + } + } +} + +pub fn (mut r Redis) get(key string) !string { + // mut key2 := key.trim("\"'") + return r.send_expect_strnil(['GET', key]) +} + +pub fn (mut r Redis) exists(key string) !bool { + r2 := r.send_expect_int(['EXISTS', key])! + return r2 == 1 +} + +pub fn (mut r Redis) del(key string) !int { + return r.send_expect_int(['DEL', key]) +} + +pub fn (mut r Redis) hset(key string, skey string, value string) ! { + r.send_expect_int(['HSET', key, skey, value])! +} + +pub fn (mut r Redis) hget(key string, skey string) !string { + // mut key2 := key.trim("\"'") + return r.send_expect_strnil(['HGET', key, skey]) +} + +pub fn (mut r Redis) hgetall(key string) !map[string]string { + // mut key2 := key.trim("\"'") + res := r.send_expect_list_str(['HGETALL', key])! + mut mapped := map[string]string{} + mut i := 0 + for i < res.len && i + 1 < res.len { + mapped[res[i]] = res[i + 1] + i += 2 + } + return mapped +} + +pub fn (mut r Redis) hexists(key string, skey string) !bool { + return r.send_expect_bool(['HEXISTS', key, skey]) +} + +pub fn (mut r Redis) hdel(key string, skey string) !int { + return r.send_expect_int(['HDEL', key, skey]) +} + +pub fn (mut r Redis) incrby(key string, increment int) !int { + return r.send_expect_int(['INCRBY', key, increment.str()]) +} + +pub fn (mut r Redis) incr(key string) !int { + return r.incrby(key, 1) +} + +pub fn (mut r Redis) decr(key string) !int { + return r.incrby(key, -1) +} + +pub fn (mut r Redis) decrby(key string, decrement int) !int { + return r.incrby(key, -decrement) +} + +pub fn (mut r Redis) incrbyfloat(key string, increment f64) !f64 { + res := r.send_expect_str(['INCRBYFLOAT', key, increment.str()])! + count := res.f64() + return count +} + +pub fn (mut r Redis) append(key string, value string) !int { + return r.send_expect_int(['APPEND', key, value]) +} + +pub fn (mut r Redis) setrange(key string, offset int, value string) !int { + return r.send_expect_int(['SETRANGE', key, offset.str(), value.str()]) +} + +pub fn (mut r Redis) lpush(key string, element string) !int { + return r.send_expect_int(['LPUSH', key, element]) +} + +pub fn (mut r Redis) rpush(key string, element string) !int { + return r.send_expect_int(['RPUSH', key, element]) +} + +pub fn (mut r Redis) lrange(key string, start int, end int) ![]resp.RValue { + return r.send_expect_list(['LRANGE', key, start.str(), end.str()]) +} + +pub fn (mut r Redis) expire(key string, seconds int) !int { + return r.send_expect_int(['EXPIRE', key, seconds.str()]) +} + +pub fn (mut r Redis) pexpire(key string, millis int) !int { + return r.send_expect_int(['PEXPIRE', key, millis.str()]) +} + +pub fn (mut r Redis) expireat(key string, timestamp int) !int { + return r.send_expect_int(['EXPIREAT', key, timestamp.str()]) +} + +pub fn (mut r Redis) pexpireat(key string, millistimestamp i64) !int { + return r.send_expect_int(['PEXPIREAT', key, millistimestamp.str()]) +} + +pub fn (mut r Redis) persist(key string) !int { + return r.send_expect_int(['PERSIST', key]) +} + +pub fn (mut r Redis) getset(key string, value string) !string { + return r.send_expect_strnil(['GETSET', key, value]) +} + +pub fn (mut r Redis) getrange(key string, start int, end int) !string { + return r.send_expect_str(['GETRANGE', key, start.str(), end.str()]) +} + +pub fn (mut r Redis) keys(pattern string) ![]string { + response := r.send_expect_list(['KEYS', pattern])! + mut result := []string{} + for item in response { + result << resp.get_redis_value(item) + } + return result +} + +pub fn (mut r Redis) hkeys(key string) ![]string { + response := r.send_expect_list(['HKEYS', key])! + mut result := []string{} + for item in response { + result << resp.get_redis_value(item) + } + return result +} + +pub fn (mut r Redis) randomkey() !string { + return r.send_expect_strnil(['RANDOMKEY']) +} + +pub fn (mut r Redis) strlen(key string) !int { + return r.send_expect_int(['STRLEN', key]) +} + +pub fn (mut r Redis) lpop(key string) !string { + return r.send_expect_strnil(['LPOP', key]) +} + +pub fn (mut r Redis) blpop(keys []string, timeout f64) ![]string { + mut request := ['BLPOP'] + request << keys + request << '${timeout}' + res := r.send_expect_list_str(request)! + if res.len != 2 || res[1] == '' { + return error('timeout on blpop') + } + return res +} + +pub fn (mut r Redis) brpop(keys []string, timeout f64) ![]string { + mut request := ['BRPOP'] + request << keys + request << '${timeout}' + res := r.send_expect_list_str(request)! + if res.len != 2 { + return error('timeout on brpop') + } + return res +} + +pub fn (mut r Redis) rpop(key string) !string { + return r.send_expect_strnil(['RPOP', key]) +} + +pub fn (mut r Redis) llen(key string) !int { + return r.send_expect_int(['LLEN', key]) +} + +pub fn (mut r Redis) ttl(key string) !int { + return r.send_expect_int(['TTL', key]) +} + +pub fn (mut r Redis) pttl(key string) !int { + return r.send_expect_int(['PTTL', key]) +} + +pub fn (mut r Redis) rename(key string, newkey string) ! { + return r.send_expect_ok(['RENAME', key, newkey]) +} + +pub fn (mut r Redis) renamenx(key string, newkey string) !int { + return r.send_expect_int(['RENAMENX', key, newkey]) +} + +pub fn (mut r Redis) setex(key string, second i64, value string) ! { + return r.send_expect_ok(['SETEX', key, second.str(), value]) +} + +pub fn (mut r Redis) psetex(key string, millisecond i64, value string) ! { + return r.send_expect_ok(['PSETEX', key, millisecond.str(), value]) +} + +pub fn (mut r Redis) setnx(key string, value string) !int { + return r.send_expect_int(['SETNX', key, value]) +} + +pub fn (mut r Redis) type_of(key string) !string { + return r.send_expect_strnil(['TYPE', key]) +} + +pub fn (mut r Redis) flushall() ! { + return r.send_expect_ok(['FLUSHALL']) +} + +pub fn (mut r Redis) flushdb() ! { + return r.send_expect_ok(['FLUSHDB']) +} + +// select is reserved +pub fn (mut r Redis) selectdb(database int) ! { + return r.send_expect_ok(['SELECT', database.str()]) +} + +pub fn (mut r Redis) scan(cursor int) !(string, []string) { + res := r.send_expect_list(['SCAN', cursor.str()])! + if res[0] !is resp.RBString { + return error('Redis SCAN wrong response type (cursor)') + } + + if res[1] !is resp.RArray { + return error('Redis SCAN wrong response type (list content)') + } + + mut values := []string{} + + for i in 0 .. resp.get_redis_array_len(res[1]) { + values << resp.get_redis_value_by_index(res[1], i) + } + + return resp.get_redis_value(res[0]), values +} + +// Add the specified members to the set stored at key. Specified members that are already a member +// of this set are ignored. If key does not exist, a new set is created before adding the specified members. +// An error is returned when the value stored at key is not a set. +pub fn (mut r Redis) sadd(key string, members []string) !int { + mut tosend := ['SADD', key] + for k in members { + tosend << k + } + return r.send_expect_int(tosend) +} + +// Returns if member is a member of the set stored at key. +pub fn (mut r Redis) smismember(key string, members []string) ![]int { + // mut key2 := key.trim("\"'") + mut tosend := ['SMISMEMBER', key] + for k in members { + tosend << k + } + res := r.send_expect_list_int(tosend)! + return res +} diff --git a/lib/clients/redisclient/redisclient_core.v b/lib/clients/redisclient/redisclient_core.v new file mode 100644 index 00000000..d030d989 --- /dev/null +++ b/lib/clients/redisclient/redisclient_core.v @@ -0,0 +1,24 @@ +module redisclient + +@[params] +pub struct RedisURL { + address string = '127.0.0.1' + port int = 6379 + // db int +} + +pub fn get_redis_url(url string) !RedisURL { + if !url.contains(':') { + return error('url doesnt contain port') + } else { + return RedisURL{ + address: url.all_before_last(':') + port: url.all_after_last(':').u16() + } + } +} + +pub fn core_get(url RedisURL) !Redis { + mut r := new('${url.address}:${url.port}')! + return r +} diff --git a/lib/clients/redisclient/redisclient_encode.v b/lib/clients/redisclient/redisclient_encode.v new file mode 100644 index 00000000..9180d352 --- /dev/null +++ b/lib/clients/redisclient/redisclient_encode.v @@ -0,0 +1,173 @@ +module redisclient + +import freeflowuniverse.herolib.data.resp + +pub fn (mut r Redis) get_response() !resp.RValue { + line := r.read_line()! + + if line.starts_with('-') { + return resp.RError{ + value: line[1..] + } + } + if line.starts_with(':') { + return resp.RInt{ + value: line[1..].int() + } + } + if line.starts_with('+') { + return resp.RString{ + value: line[1..] + } + } + if line.starts_with('$') { + mut bulkstring_size := line[1..].int() + if bulkstring_size == -1 { + return resp.RNil{} + } + if bulkstring_size == 0 { + // extract final \r\n and not reading + // any payload + r.read_line()! + return resp.RString{ + value: '' + } + } + // read payload + buffer := r.read(bulkstring_size) or { panic(err) } + // extract final \r\n + r.read_line()! + // console.print_debug("readline result:'$buffer.bytestr()'") + return resp.RBString{ + value: buffer + } // TODO: won't support binary (afaik), need to fix? WHY not (despiegk)? + } + + if line.starts_with('*') { + mut arr := resp.RArray{ + values: []resp.RValue{} + } + items := line[1..].int() + + // proceed each entries, they can be of any types + for _ in 0 .. items { + value := r.get_response()! + arr.values << value + } + + return arr + } + + return error('unsupported response type') +} + +// TODO: needs to use the resp library + +pub fn (mut r Redis) get_int() !int { + line := r.read_line()! + if line.starts_with(':') { + return line[1..].int() + } else { + return error("Did not find int, did find:'${line}'") + } +} + +pub fn (mut r Redis) get_list_int() ![]int { + line := r.read_line()! + mut res := []int{} + + if line.starts_with('*') { + items := line[1..].int() + // proceed each entries, they can be of any types + for _ in 0 .. items { + value := r.get_int()! + res << value + } + return res + } else { + return error("Did not find int, did find:'${line}'") + } +} + +pub fn (mut r Redis) get_list_str() ![]string { + line := r.read_line()! + mut res := []string{} + + if line.starts_with('*') { + items := line[1..].int() + // proceed each entries, they can be of any types + for _ in 0 .. items { + value := r.get_string()! + res << value + } + return res + } else { + return error("Did not find int, did find:'${line}'") + } +} + +pub fn (mut r Redis) get_string() !string { + line := r.read_line()! + if line.starts_with('+') { + // console.print_debug("getstring:'${line[1..]}'") + return line[1..] + } + if line.starts_with('$') { + r2 := r.get_bytes_from_line(line)! + return r2.bytestr() + } else { + return error("Did not find string, did find:'${line}'") + } +} + +pub fn (mut r Redis) get_string_nil() !string { + r2 := r.get_bytes_nil()! + return r2.bytestr() +} + +pub fn (mut r Redis) get_bytes_nil() ![]u8 { + line := r.read_line()! + if line.starts_with('+') { + return line[1..].bytes() + } + if line.starts_with('$-1') { + return []u8{} + } + if line.starts_with('$') { + return r.get_bytes_from_line(line) + } else { + return error("Did not find string or nil, did find:'${line}'") + } +} + +pub fn (mut r Redis) get_bool() !bool { + i := r.get_int()! + return i == 1 +} + +pub fn (mut r Redis) get_bytes() ![]u8 { + line := r.read_line()! + if line.starts_with('$') { + return r.get_bytes_from_line(line) + } else { + return error("Did not find bulkstring, did find:'${line}'") + } +} + +fn (mut r Redis) get_bytes_from_line(line string) ![]u8 { + mut bulkstring_size := line[1..].int() + if bulkstring_size == -1 { + // return none + return error('bulkstring_size is -1') + } + if bulkstring_size == 0 { + // extract final \r\n, there is no payload + r.read_line()! + return [] + } + // read payload + buffer := r.read(bulkstring_size) or { panic('Could not read payload: ${err}') } + // extract final \r\n + r.read_line()! + return buffer +} diff --git a/lib/clients/redisclient/redisclient_internal.v b/lib/clients/redisclient/redisclient_internal.v new file mode 100644 index 00000000..eb018385 --- /dev/null +++ b/lib/clients/redisclient/redisclient_internal.v @@ -0,0 +1,121 @@ +module redisclient + +import os +import net +import freeflowuniverse.herolib.data.resp +import time +import net.unix + +pub struct SetOpts { + ex int = -4 + px int = -4 + nx bool + xx bool + keep_ttl bool +} + +pub enum KeyType { + t_none + t_string + t_list + t_set + t_zset + t_hash + t_stream + t_unknown +} + +fn (mut r Redis) socket_connect() ! { + // print_backtrace() + addr := os.expand_tilde_to_home(r.addr) + // console.print_debug(' - REDIS CONNECT: ${addr}') + if !addr.contains(':') { + unix_socket := unix.connect_stream(addr)! + tcp_socket := net.tcp_socket_from_handle_raw(unix_socket.sock.Socket.handle) + tcp_conn := net.TcpConn{ + sock: tcp_socket + handle: unix_socket.sock.Socket.handle + } + r.socket = tcp_conn + } else { + r.socket = net.dial_tcp(addr)! + } + + r.socket.set_blocking(true)! + r.socket.set_read_timeout(1 * time.second) + // console.print_debug("---OK") +} + +fn (mut r Redis) socket_check() ! { + r.socket.peer_addr() or { + // console.print_debug(' - re-connect socket for redis') + r.socket_connect()! + } +} + +pub fn (mut r Redis) read_line() !string { + return r.socket.read_line().trim_right('\r\n') +} + +// write *all the data* into the socket +// This function loops, till *everything is written* +// (some of the socket write ops could be partial) +fn (mut r Redis) write(data []u8) ! { + r.socket_check()! + mut remaining := data.len + for remaining > 0 { + // zdbdata[data.len - remaining..].bytestr()) + written_bytes := r.socket.write(data[data.len - remaining..])! + remaining -= written_bytes + } +} + +fn (mut r Redis) read(size int) ![]u8 { + r.socket_check() or {} + mut buf := []u8{len: size} + mut remaining := size + for remaining > 0 { + read_bytes := r.socket.read(mut buf[buf.len - remaining..])! + remaining -= read_bytes + } + return buf +} + +pub fn (mut r Redis) disconnect() { + r.socket.close() or {} +} + +//////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// + +// TODO: need to implement a way how to use multiple connections at once + +const cr_lf_bytes = [u8(`\r`), `\n`] + +fn (mut r Redis) write_line(data []u8) ! { + r.write(data)! + r.write(cr_lf_bytes)! +} + +// write resp value to the redis channel +pub fn (mut r Redis) write_rval(val resp.RValue) ! { + r.write(val.encode())! +} + +// write list of strings to redis challen +fn (mut r Redis) write_cmd(item string) ! { + a := resp.r_bytestring(item.bytes()) + r.write_rval(a)! +} + +// write list of strings to redis challen +fn (mut r Redis) write_cmds(items []string) ! { + // if items.len==1{ + // a := resp.r_bytestring(items[0].bytes()) + // r.write_rval(a)! + // }{ + a := resp.r_list_bstring(items) + r.write_rval(a)! + // } +} diff --git a/lib/clients/redisclient/redisclient_queue.v b/lib/clients/redisclient/redisclient_queue.v new file mode 100644 index 00000000..258867e1 --- /dev/null +++ b/lib/clients/redisclient/redisclient_queue.v @@ -0,0 +1,41 @@ +module redisclient + +import time + +pub struct RedisQueue { +pub mut: + key string + redis &Redis +} + +pub fn (mut r Redis) queue_get(key string) RedisQueue { + return RedisQueue{ + key: key + redis: r + } +} + +pub fn (mut q RedisQueue) add(val string) ! { + q.redis.lpush(q.key, val)! +} + +// timeout in msec +pub fn (mut q RedisQueue) get(timeout u64) !string { + start := u64(time.now().unix_milli()) + for { + r := q.redis.rpop(q.key) or { '' } + if r != '' { + return r + } + if u64(time.now().unix_milli()) > (start + timeout) { + break + } + time.sleep(time.microsecond) + } + return error('timeout on ${q.key}') +} + +// get without timeout, returns none if nil +pub fn (mut q RedisQueue) pop() !string { + return q.redis.rpop(q.key)! +} diff --git a/lib/clients/redisclient/redisclient_rpc.v b/lib/clients/redisclient/redisclient_rpc.v new file mode 100644 index 00000000..eec1b8be --- /dev/null +++ b/lib/clients/redisclient/redisclient_rpc.v @@ -0,0 +1,136 @@ +module redisclient + +import rand +import time +import json + +pub struct RedisRpc { +pub mut: + key string // queue name as used by this rpc + redis &Redis +} + +// return a rpc mechanism +pub fn (mut r Redis) rpc_get(key string) RedisRpc { + return RedisRpc{ + key: key + redis: r + } +} + +pub struct RPCArgs { +pub: + cmd string @[required] + data string @[required] + timeout u64 = 60000 // 60 sec + wait bool = true +} + +pub struct Message { +pub: + ret_queue string + now i64 + cmd string + data string +} + +pub struct Response { +pub: + result string + error string +} + +// send data to a queue and wait till return comes back +// timeout in milliseconds +// params +// cmd string @[required] +// data string @[required] +// timeout u64=60000 //60 sec +// wait bool=true +pub fn (mut q RedisRpc) call(args RPCArgs) !string { + retqueue := rand.uuid_v4() + now := time.now().unix() + message := Message{ + ret_queue: retqueue + now: now + cmd: args.cmd + data: args.data + } + encoded := json.encode(message) + q.redis.lpush(q.key, encoded)! + + if args.wait { + return q.result(args.timeout, retqueue)! + } + return '' +} + +// get return once result processed +pub fn (mut q RedisRpc) result(timeout u64, retqueue string) !string { + start := u64(time.now().unix_milli()) + for { + r := q.redis.rpop(retqueue) or { '' } + if r != '' { + res := json.decode(Response, r)! + if res.error != '' { + return res.error + } + return res.result + } + if u64(time.now().unix_milli()) > (start + timeout) { + break + } + time.sleep(time.millisecond) + } + return error('timeout on returnqueue: ${retqueue}') +} + +@[params] +pub struct ProcessParams { +pub: + timeout u64 +} + +// to be used by processor, to get request and execute, this is the server side of a RPC mechanism +// 2nd argument is a function which needs to execute the job: fn (string,string) !string +pub fn (mut q RedisRpc) process(op fn (string, string) !string, params ProcessParams) !string { + start := u64(time.now().unix_milli()) + for { + r := q.redis.rpop(q.key) or { '' } + if r != '' { + msg := json.decode(Message, r)! + + returnqueue := msg.ret_queue + // epochtime:=parts[1].u64() //we don't do anything with it now + cmd := msg.cmd + data := msg.data + // if true{panic("sd")} + datareturn := op(cmd, data) or { + response := Response{ + result: '' + error: err.str() + } + encoded := json.encode(response) + q.redis.lpush(returnqueue, encoded)! + return '' + } + response := Response{ + result: datareturn + error: '' + } + encoded := json.encode(response) + q.redis.lpush(returnqueue, encoded)! + return returnqueue + } + if params.timeout != 0 && u64(time.now().unix_milli()) > (start + params.timeout) { + break + } + time.sleep(time.millisecond) + } + return error('timeout for waiting for cmd on ${q.key}') +} + +// get without timeout, returns none if nil +pub fn (mut q RedisRpc) delete() ! { + q.redis.del(q.key)! +} diff --git a/lib/clients/redisclient/redisclient_sadd_test.v b/lib/clients/redisclient/redisclient_sadd_test.v new file mode 100644 index 00000000..6563ebec --- /dev/null +++ b/lib/clients/redisclient/redisclient_sadd_test.v @@ -0,0 +1,25 @@ +import freeflowuniverse.herolib.clients.redisclient + +fn setup() !&redisclient.Redis { + mut redis := redisclient.core_get()! + redis.selectdb(10) or { panic(err) } + return &redis +} + +fn cleanup(mut redis redisclient.Redis) ! { + redis.flushall()! + // redis.disconnect() +} + +fn test_sadd() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + + redis.sadd('mysadd', ['a', 'b', 'c']) or { panic(err) } + r := redis.smismember('mysadd', ['a', 'b', 'c']) or { panic(err) } + assert r == [1, 1, 1] + r2 := redis.smismember('mysadd', ['a', 'd', 'c']) or { panic(err) } + assert r2 == [1, 0, 1] +} diff --git a/lib/clients/redisclient/redisclient_script.v b/lib/clients/redisclient/redisclient_script.v new file mode 100644 index 00000000..68664a8a --- /dev/null +++ b/lib/clients/redisclient/redisclient_script.v @@ -0,0 +1,6 @@ +module redisclient + +// load a script and return the hash +pub fn (mut r Redis) script_load(script string) !string { + return r.send_expect_str(['SCRIPT LOAD', script])! +} diff --git a/lib/clients/redisclient/redisclient_send.v b/lib/clients/redisclient/redisclient_send.v new file mode 100644 index 00000000..b68660ed --- /dev/null +++ b/lib/clients/redisclient/redisclient_send.v @@ -0,0 +1,55 @@ +module redisclient + +import freeflowuniverse.herolib.data.resp +import freeflowuniverse.herolib.ui.console + +// send list of strings, expect OK back +pub fn (mut r Redis) send_expect_ok(items []string) ! { + r.write_cmds(items)! + res := r.get_string()! + if res != 'OK' { + console.print_debug("'${res}'") + return error('did not get ok back') + } +} + +// send list of strings, expect int back +pub fn (mut r Redis) send_expect_int(items []string) !int { + r.write_cmds(items)! + return r.get_int() +} + +pub fn (mut r Redis) send_expect_bool(items []string) !bool { + r.write_cmds(items)! + return r.get_bool() +} + +// send list of strings, expect string back +pub fn (mut r Redis) send_expect_str(items []string) !string { + r.write_cmds(items)! + return r.get_string() +} + +// send list of strings, expect string or nil back +pub fn (mut r Redis) send_expect_strnil(items []string) !string { + r.write_cmds(items)! + d := r.get_string_nil()! + return d +} + +// send list of strings, expect list of strings back +pub fn (mut r Redis) send_expect_list_str(items []string) ![]string { + r.write_cmds(items)! + return r.get_list_str() +} + +pub fn (mut r Redis) send_expect_list_int(items []string) ![]int { + r.write_cmds(items)! + return r.get_list_int() +} + +pub fn (mut r Redis) send_expect_list(items []string) ![]resp.RValue { + r.write_cmds(items)! + res := r.get_response()! + return resp.get_redis_array(res) +} diff --git a/lib/clients/redisclient/redisclient_test.v b/lib/clients/redisclient/redisclient_test.v new file mode 100644 index 00000000..f15ef8c5 --- /dev/null +++ b/lib/clients/redisclient/redisclient_test.v @@ -0,0 +1,864 @@ +import freeflowuniverse.herolib.clients.redisclient +import time +import freeflowuniverse.herolib.ui.console +// original code see https://github.com/patrickpissurno/vredis/blob/master/vredis_test.v +// credits see there as well (-: + +fn setup() !&redisclient.Redis { + mut redis := redisclient.core_get()! + // Select db 10 to be away from default one '0' + redis.selectdb(10) or { panic(err) } + return &redis +} + +fn cleanup(mut redis redisclient.Redis) ! { + redis.flushall()! + // redis.disconnect() +} + +fn test_set() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + // console.print_debug('start') + // for _ in 0 .. 10000 { + // redis.set('test0', '123')! + // } + console.print_debug('stop') + redis.set('test0', '456')! + res := redis.get('test0')! + assert res == '456' + + redis.hset('x', 'a', '222')! + redis.hset('x', 'b', '333')! + mut res3 := redis.hget('x', 'b')! + assert res3 == '333' + redis.hdel('x', 'b')! + res3 = redis.hget('x', 'b')! + assert res3 == '' + e := redis.hexists('x', 'a')! + assert e +} + +fn test_large_value() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + rr := 'SSS' + 'a'.repeat(40000) + 'EEE' + mut rr2 := '' + for i in 0 .. 50 { + redis.set('test_large_value0', rr)! + rr2 = redis.get('test_large_value0')! + assert rr.len == rr2.len + assert rr == rr2 + } + for i3 in 0 .. 100 { + redis.set('test_large_value${i3}', rr)! + } + for i4 in 0 .. 100 { + rr4 := redis.get('test_large_value${i4}')! + assert rr.len == rr4.len + redis.del('test_large_value${i4}')! + } +} + +fn test_queue() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + mut q := redis.queue_get('kds:q') + q.add('test1')! + q.add('test2')! + mut res := q.get(1)! + assert res == 'test1' + res = q.get(1)! + assert res == 'test2' + console.print_debug('start') + res = q.get(100) or { '' } + console.print_debug('stop') + assert res == '' + console.print_debug(res) +} + +fn test_scan() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + console.print_debug('stop') + redis.set('test3', '12')! + redis.set('test4', '34')! + redis.set('test5', '56')! + redis.set('test6', '78')! + redis.set('test7', '9')! + cursor, data := redis.scan(0)! + console.print_debug(data) + assert cursor == '0' +} + +// fn test_set_opts() { +// mut redis := setup()! +// defer { +// cleanup(mut redis) or { panic(err) } +// } +// assert redis.set_opts('test8', '123', redisclient.SetOpts{ +// ex: 2 +// }) or {false}== true +// assert redis.set_opts('test8', '456', redisclient.SetOpts{ +// px: 2000 +// xx: true +// }) or {false} == true +// assert redis.set_opts('test8', '789', redisclient.SetOpts{ +// px: 1000 +// nx: true +// }) or {false}== false +// // Works with redis version > 6 +// assert redis.set_opts('test8', '012', redisclient.SetOpts{ keep_ttl: true }) or {false}== true +// } + +fn test_setex() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.setex('test9', 2, '123')! + mut r := redis.get('test9')! + assert r == '123' + + time.sleep(2100 * time.millisecond) + r = redis.get('test9')! + + assert r == '' +} + +fn test_psetex() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.psetex('test10', 200, '123')! + mut r := redis.get('test10') or { + assert false + return + } + assert r == '123' + + time.sleep(220 * time.millisecond) + r = redis.get('test10')! + assert r == '' +} + +fn test_setnx() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + mut r1 := redis.setnx('test11', '123')! + assert r1 == 1 + r1 = redis.setnx('test11', '456')! + assert r1 == 0 + + val := redis.get('test11') or { + assert false + return + } + assert val == '123' +} + +fn test_incrby() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + + redis.set('test12', '100')! + r1 := redis.incrby('test12', 4) or { + assert false + return + } + assert r1 == 104 + + r2 := redis.incrby('test13', 2) or { + assert false + return + } + assert r2 == 2 + + redis.set('test14', 'nan')! + redis.incrby('test14', 1) or { + assert true + return + } + assert false +} + +fn test_incr() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test15', '100')! + r1 := redis.incr('test15') or { + assert false + return + } + assert r1 == 101 + + r2 := redis.incr('test16') or { + assert false + return + } + assert r2 == 1 + + redis.set('test17', 'nan')! + redis.incr('test17') or { + assert true + return + } + assert false +} + +fn test_decr() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test18', '100')! + r1 := redis.decr('test18') or { + assert false + return + } + assert r1 == 99 + + r2 := redis.decr('test19') or { + assert false + return + } + assert r2 == -1 + + redis.set('test20', 'nan')! + redis.decr('test20') or { + assert true + return + } + assert false +} + +fn test_decrby() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test21', '100')! + r1 := redis.decrby('test21', 4) or { + assert false + return + } + assert r1 == 96 + + r2 := redis.decrby('test22', 2) or { + assert false + return + } + assert r2 == -2 + + redis.set('test23', 'nan')! + redis.decrby('test23', 1) or { + assert true + return + } + assert false +} + +fn test_incrbyfloat() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test24', '3.1415')! + r1 := redis.incrbyfloat('test24', 3.1415) or { + assert false + return + } + assert r1 == 6.283 + + r2 := redis.incrbyfloat('test25', 3.14) or { + assert false + return + } + assert r2 == 3.14 + + r3 := redis.incrbyfloat('test25', -3.14) or { + assert false + return + } + assert r3 == 0 + + redis.set('test26', 'nan')! + redis.incrbyfloat('test26', 1.5) or { + assert true + return + } + assert false +} + +fn test_append() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test27', 'bac')! + r1 := redis.append('test27', 'on') or { + assert false + return + } + assert r1 == 5 + + r2 := redis.get('test27') or { + assert false + return + } + assert r2 == 'bacon' +} + +fn test_lpush() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r := redis.lpush('test28', 'item 1') or { + assert false + return + } + assert r == 1 +} + +fn test_rpush() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r := redis.rpush('test29', 'item 1') or { + assert false + return + } + assert r == 1 +} + +fn test_setrange() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r1 := redis.setrange('test30', 0, 'bac') or { + assert false + return + } + assert r1 == 3 + + r2 := redis.setrange('test30', 3, 'on') or { + assert false + return + } + assert r2 == 5 +} + +fn test_expire() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r1 := redis.expire('test31', 2) or { + assert false + return + } + assert r1 == 0 + + redis.set('test31', '123')! + r2 := redis.expire('test31', 2) or { + assert false + return + } + assert r2 == 1 +} + +fn test_pexpire() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r1 := redis.pexpire('test32', 200) or { + assert false + return + } + assert r1 == 0 + + redis.set('test32', '123')! + r2 := redis.pexpire('test32', 200) or { + assert false + return + } + assert r2 == 1 +} + +fn test_expireat() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r1 := redis.expireat('test33', 1293840000) or { + assert false + return + } + assert r1 == 0 + + redis.set('test33', '123')! + r2 := redis.expireat('test33', 1293840000) or { + assert false + return + } + assert r2 == 1 +} + +fn test_pexpireat() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r1 := redis.pexpireat('test34', 1555555555005) or { + assert false + return + } + assert r1 == 0 + + redis.set('test34', '123')! + r2 := redis.pexpireat('test34', 1555555555005) or { + assert false + return + } + assert r2 == 1 +} + +fn test_persist() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r1 := redis.persist('test35') or { + assert false + return + } + assert r1 == 0 + redis.setex('test35', 2, '123')! + r2 := redis.persist('test35') or { + assert false + return + } + assert r2 == 1 +} + +fn test_get() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test36', '123')! + mut r := redis.get('test36')! + assert r == '123' + assert helper_get_key_not_found(mut redis, 'test37') == true +} + +fn test_getset() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + mut r1 := redis.getset('test38', '10') or { '' } + assert r1 == '' + + r2 := redis.getset('test38', '15') or { + assert false + return + } + assert r2 == '10' + + r3 := redis.get('test38') or { + assert false + return + } + assert r3 == '15' +} + +fn test_getrange() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test39', 'community')! + r1 := redis.getrange('test39', 4, -1) or { + assert false + return + } + assert r1 == 'unity' + + r2 := redis.getrange('test40', 0, -1) or { + assert false + return + } + assert r2 == '' +} + +fn test_randomkey() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + assert helper_randomkey_database_empty(mut redis) == true + redis.set('test41', '123')! + r2 := redis.randomkey() or { + assert false + return + } + assert r2 == 'test41' + assert helper_get_key_not_found(mut redis, 'test42') == true +} + +fn test_strlen() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test43', 'bacon')! + r1 := redis.strlen('test43') or { + assert false + return + } + assert r1 == 5 + + r2 := redis.strlen('test44') or { + assert false + return + } + assert r2 == 0 +} + +fn test_lpop() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.lpush('test45', '123') or { + assert false + return + } + r1 := redis.lpop('test45') or { + assert false + return + } + assert r1 == '123' + assert helper_lpop_key_not_found(mut redis, 'test46') == true +} + +fn test_rpop() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.lpush('test47', '123') or { + assert false + return + } + r1 := redis.rpop('test47') or { + assert false + return + } + assert r1 == '123' + assert helper_rpop_key_not_found(mut redis, 'test48') == true +} + +fn test_brpop() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.lpush('test47', '123')! + redis.lpush('test48', 'balbal')! + r1 := redis.brpop(['test47', 'test48'], 1)! + assert r1[0] == 'test47' + assert r1[1] == '123' + r2 := redis.brpop(['test47', 'test48'], 1)! + assert r2[0] == 'test48' + assert r2[1] == 'balbal' + r3 := redis.brpop(['test47'], 1) or { return } + assert false, 'brpop should timeout' +} + +fn test_lrpop() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.lpush('test47', '123')! + redis.lpush('test48', 'balbal')! + r1 := redis.blpop(['test47', 'test48'], 1)! + assert r1[0] == 'test47' + assert r1[1] == '123' + r2 := redis.blpop(['test47', 'test48'], 1)! + assert r2[0] == 'test48' + assert r2[1] == 'balbal' + r3 := redis.blpop(['test47'], 1) or { return } + assert false, 'blpop should timeout' +} + +fn test_llen() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r1 := redis.lpush('test49', '123') or { + assert false + return + } + r2 := redis.llen('test49') or { + assert false + return + } + assert r2 == r1 + + r3 := redis.llen('test50') or { + assert false + return + } + assert r3 == 0 + + redis.set('test51', 'not a list')! + redis.llen('test51') or { + assert true + return + } + assert false +} + +fn test_ttl() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.setex('test52', 15, '123')! + r1 := redis.ttl('test52') or { + assert false + return + } + assert r1 == 15 + + redis.set('test53', '123')! + r2 := redis.ttl('test53') or { + assert false + return + } + assert r2 == -1 + + r3 := redis.ttl('test54') or { + assert false + return + } + assert r3 == -2 +} + +fn test_pttl() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.psetex('test55', 1500, '123')! + r1 := redis.pttl('test55') or { + assert false + return + } + assert r1 >= 1490 && r1 <= 1500 + + redis.set('test56', '123')! + r2 := redis.pttl('test56') or { + assert false + return + } + assert r2 == -1 + + r3 := redis.pttl('test57') or { + assert false + return + } + assert r3 == -2 +} + +fn test_exists() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + r1 := redis.exists('test58') or { + assert false + return + } + assert r1 == false + + redis.set('test59', '123')! + r2 := redis.exists('test59') or { + assert false + return + } + assert r2 == true +} + +fn test_type_of() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + _ := redis.type_of('test60') or { + assert true + return + } + + redis.set('test61', '123')! + mut r := redis.type_of('test61') or { + assert false + return + } + assert r == 'string' + + _ := redis.lpush('test62', '123')! + r = redis.type_of('test62') or { + assert false + return + } + assert r == 'list' +} + +fn test_del() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test63', '123')! + c := redis.del('test63') or { + assert false + return + } + assert c == 1 + assert helper_get_key_not_found(mut redis, 'test63') == true +} + +fn test_rename() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.rename('test64', 'test65') or { console.print_debug('key not found') } + redis.set('test64', 'will be 65')! + redis.rename('test64', 'test65')! + r := redis.get('test65') or { + assert false + return + } + assert r == 'will be 65' +} + +fn test_renamenx() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + assert helper_renamenx_err_helper(mut redis, 'test66', 'test67') == 'no such key' + redis.set('test68', '123')! + redis.set('test66', 'will be 67')! + r1 := redis.renamenx('test66', 'test67') or { + assert false + return + } + assert r1 == 1 + + r2 := redis.get('test67') or { + assert false + return + } + assert r2 == 'will be 67' + + r3 := redis.renamenx('test67', 'test68') or { + assert false + return + } + assert r3 == 0 +} + +fn test_flushall() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test69', '123')! + redis.flushall()! + assert helper_get_key_not_found(mut redis, 'test69') == true +} + +fn test_keys() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + redis.set('test70:1', '1')! + redis.set('test70:2', '2')! + r1 := redis.keys('test70:*') or { + assert false + return + } + assert r1.len == 2 +} + +fn helper_get_key_not_found(mut redis redisclient.Redis, key string) bool { + return redis.get(key) or { + if err.msg() == 'key not found' || err.msg() == '' { + return true + } else { + return false + } + } == '' +} + +fn helper_randomkey_database_empty(mut redis redisclient.Redis) bool { + return redis.randomkey() or { + if err.msg() == 'database is empty' || err.msg() == '' { + return true + } else { + return false + } + } == '' +} + +fn helper_renamenx_err_helper(mut redis redisclient.Redis, key string, newkey string) string { + redis.renamenx(key, newkey) or { return 'no such key' } + return '' +} + +fn helper_lpop_key_not_found(mut redis redisclient.Redis, key string) bool { + return redis.lpop(key) or { + if err.msg() == 'key not found' || err.msg() == '' { + return true + } else { + return false + } + } == '' +} + +fn helper_rpop_key_not_found(mut redis redisclient.Redis, key string) bool { + return redis.rpop(key) or { + if err.msg() == 'key not found' || err.msg() == '' { + return true + } else { + return false + } + } == '' +} diff --git a/lib/clients/redisclient/rpc_test.v b/lib/clients/redisclient/rpc_test.v new file mode 100644 index 00000000..a2802aca --- /dev/null +++ b/lib/clients/redisclient/rpc_test.v @@ -0,0 +1,33 @@ +import freeflowuniverse.herolib.clients.redisclient +import freeflowuniverse.herolib.ui.console + +fn setup() !&redisclient.Redis { + mut redis := redisclient.core_get()! + // Select db 10 to be away from default one '0' + redis.selectdb(10) or { panic(err) } + return &redis +} + +fn cleanup(mut redis redisclient.Redis) ! { + redis.flushall()! + // redis.disconnect() +} + +fn process_test(cmd string, data string) !string { + return '${cmd}+++++${data}\n\n\n\n' +} + +fn test_rpc() { + mut redis := setup()! + defer { + cleanup(mut redis) or { panic(err) } + } + mut r := redis.rpc_get('testrpc') + + r.call(cmd: 'test.cmd', data: 'this is my data, normally json', wait: false)! + returnqueue := r.process(10000, process_test)! + mut res := r.result(10000, returnqueue)! + console.print_debug(res) + + assert res.str().trim_space() == 'test.cmd+++++this is my data, normally json' +} diff --git a/lib/clients/sendgrid/README.md b/lib/clients/sendgrid/README.md new file mode 100644 index 00000000..30e779a3 --- /dev/null +++ b/lib/clients/sendgrid/README.md @@ -0,0 +1,53 @@ +# SendGrid Client + +The SendGrid module allows you to use SendGrid services. + +## About SendGrid + +SendGrid is a cloud-based email delivery and communication platform that empowers businesses and developers to send transactional and marketing emails to their customers or users. It offers tools and APIs to manage email campaigns, monitor delivery, and gather analytics on recipient engagement. + +## Requirements + +To utilize this module, you will need: + +- A SendGrid API key: Create a SendGrid account and acquire your API key [here](https://sendgrid.com/). + +## Usage + +To send an email using the SendGrid module, follow these steps: + +### 1. Set Up a new email + +In your V code, set up the email as shown below: + +```v +email := sendgrid.new_email( + ['target_email@example.com', 'target_email2@example.com'], + 'source_email@example.com', + 'Email Title', 'Email content; can include HTML') +``` + +### 2. Execute the program + +You can execute the program using the following command: + +```shell +v run sendgrid/example/main.v -t "YOUR_API_TOKEN" +``` + +You can provide the API key using the -t command-line argument, or you can export the API key using the following command: + +```shell +export SENDGRID_AUTH_TOKEN="YOUR_API_TOKEN" +``` + +Additionally, you can enable debug mode by passing the -d flag: + +```shell +v run sendgrid/example/main.v -d -t "YOUR_API_TOKEN" +``` + +## Advanced + +We provide some useful structs and methods in [email](./email) and [personalization](./personalizations.v) that you can leverage to tailor the emails according to your specific requirements. +You can check the SendGrid API reference [here](https://docs.sendgrid.com/api-reference/how-to-use-the-sendgrid-v3-api/) diff --git a/lib/clients/sendgrid/client.v b/lib/clients/sendgrid/client.v new file mode 100644 index 00000000..0bcf8940 --- /dev/null +++ b/lib/clients/sendgrid/client.v @@ -0,0 +1,41 @@ +module sendgrid + +import net.http +import json + +pub struct Client { +pub: + token string +} + +const send_api_endpoint = 'https://api.sendgrid.com/v3/mail/send' + +pub fn new_client(token string) !Client { + if token.len == 0 { + return error('empty token') + } + + return Client{ + token: token + } +} + +fn (c Client) get_headers() !http.Header { + headers_map := { + 'Authorization': 'Bearer ${c.token}' + 'Content-Type': 'application/json' + } + headers := http.new_custom_header_from_map(headers_map)! + + return headers +} + +pub fn (c Client) send(email Email) ! { + mut request := http.new_request(http.Method.post, send_api_endpoint, json.encode(email)) + request.header = c.get_headers()! + + res := request.do()! + if res.status_code != int(http.Status.accepted) { + return error(res.body) + } +} diff --git a/lib/clients/sendgrid/email.v b/lib/clients/sendgrid/email.v new file mode 100644 index 00000000..1d4c0e97 --- /dev/null +++ b/lib/clients/sendgrid/email.v @@ -0,0 +1,152 @@ +module sendgrid + +pub struct Content { + type_ string = 'text/html' @[json: 'type'] + value string +} + +struct Recipient { + email string @[required] + name ?string +} + +struct Attachment { + content string @[required] + type_ ?string @[json: 'type'] + filename string @[required] + disposition ?string + content_id ?string +} + +struct UnsubscribeGroups { + group_id i64 @[required] + group_to_display []i64 +} + +struct BypassListManagement { + enable ?bool +} + +struct BypassBounceManagement { + enable ?bool +} + +struct BypassUnsubscribeManagement { + enable ?bool +} + +struct Footer { + enable ?bool + text ?string + html ?string +} + +struct SandboxMode { + enable ?bool +} + +struct MailSettings { + bypass_list_management ?BypassListManagement + bypass_bounce_management ?BypassBounceManagement + bypass_unsubscribe_management ?BypassUnsubscribeManagement + footer ?Footer + sandbox_mode ?SandboxMode +} + +struct ClickTrackingSettings { + enable ?bool + enable_text ?bool +} + +struct OpenTrackingSettings { + enable ?bool + substitution_tag ?string +} + +struct SubscriptionTrackingSettings { + enable ?bool + text ?string + html ?string + substitution_tag ?string +} + +struct GoogleAnalyticsSettings { + enable ?bool + utm_source ?string + utm_medium ?string + utm_term ?string + utm_content ?string + utm_campaign ?string +} + +struct TrackingSettings { + click_tracking ?ClickTrackingSettings + open_tracking ?OpenTrackingSettings + subscription_tracking ?SubscriptionTrackingSettings + ganalytics ?GoogleAnalyticsSettings +} + +pub struct Email { +pub mut: + personalizations []Personalizations @[required] + from Recipient @[required] + subject string @[required] + content []Content @[required] + reply_to ?Recipient + reply_to_list ?[]Recipient + attachments ?[]Attachment + template_id ?string + headers ?map[string]string + categories ?[]string + custom_args ?string + send_at ?i64 + batch_id ?string + asm_ ?UnsubscribeGroups @[json: 'asm'] + ip_pool_name ?string + mail_settings ?MailSettings + tracking_settings ?TrackingSettings +} + +pub fn (mut e Email) add_personalization(personalizations []Personalizations) { + e.personalizations << personalizations +} + +pub fn (mut e Email) add_content(content []Content) { + e.content << content +} + +pub fn (mut e Email) add_headers(headers map[string]string) { + e.headers or { + e.headers = headers.clone() + return + } + + for k, v in headers { + e.headers[k] = v + } +} + +pub fn new_email(to []string, from string, subject string, content string) Email { + mut recipients := []Recipient{} + + for email in to { + recipients << Recipient{ + email: email + } + } + + personalization := Personalizations{ + to: recipients + } + + return Email{ + personalizations: [personalization] + from: Recipient{ + email: from + } + subject: subject + content: [Content{ + value: content + }] + } +} diff --git a/lib/clients/sendgrid/personalizations.v b/lib/clients/sendgrid/personalizations.v new file mode 100644 index 00000000..908d5d9b --- /dev/null +++ b/lib/clients/sendgrid/personalizations.v @@ -0,0 +1,102 @@ +module sendgrid + +@[params] +pub struct Personalizations { +pub mut: + to []Recipient @[required] + from ?Recipient + cc ?[]Recipient + bcc ?[]Recipient + subject ?string + headers ?map[string]string + substitutions ?map[string]string + dynamic_template_data ?map[string]string + custom_args ?map[string]string + send_at ?i64 +} + +// add_to adds a list of recipients to which this email should be sent. +fn (mut p Personalizations) add_to(r []Recipient) { + p.to << r +} + +// set_from assigns the from field in the email. +fn (mut p Personalizations) set_from(r Recipient) { + p.from = r +} + +// add_cc adds an array of recipients who will receive a copy of your email. +fn (mut p Personalizations) add_cc(r []Recipient) { + p.cc or { + p.cc = r + return + } + + for item in r { + p.cc << item + } +} + +// set_subject assigns the subject of the email. +fn (mut p Personalizations) set_subject(s string) { + p.subject = s +} + +// add_headers adds a playbook of key/value pairs to specify handling instructions for your email. +// if some of the new headers already existed, their values are overwritten. +fn (mut p Personalizations) add_headers(new_headers map[string]string) { + p.headers or { + p.headers = new_headers.clone() + return + } + + for k, v in new_headers { + p.headers[k] = v + } +} + +// add_substitution adds a playbook of key/value pairs to allow you to insert data without using Dynamic Transactional Templates. +// if some of the keys already existed, their values are overwritten. +fn (mut p Personalizations) add_substitution(new_subs map[string]string) { + p.substitutions or { + p.substitutions = new_subs.clone() + return + } + + for k, v in new_subs { + p.substitutions[k] = v + } +} + +// add_dynamic_template_data adds a playbook of key/value pairs to dynamic template data. +// Dynamic template data is available using Handlebars syntax in Dynamic Transactional Templates. +// if some of the keys already existed, their values are overwritten. +fn (mut p Personalizations) add_dynamic_template_data(new_dynamic_template_data map[string]string) { + p.dynamic_template_data or { + p.dynamic_template_data = new_dynamic_template_data.clone() + return + } + + for k, v in new_dynamic_template_data { + p.dynamic_template_data[k] = v + } +} + +// add_custom_args adds a playbook of key/value pairs to custom_args. +// custom args are values that are specific to this personalization that will be carried along with the email and its activity data. +// if some of the keys already existed, their values are overwritten. +fn (mut p Personalizations) add_custom_args(new_custom_args map[string]string) { + p.custom_args or { + p.custom_args = new_custom_args.clone() + return + } + + for k, v in new_custom_args { + p.custom_args[k] = v + } +} + +// set_send_at specifies when your email should be delivered. scheduling delivery more than 72 hours in advance is forbidden. +fn (mut p Personalizations) set_send_at(send_at i64) { + p.send_at = send_at +} diff --git a/lib/clients/zdb/readme.md b/lib/clients/zdb/readme.md new file mode 100644 index 00000000..52674ed5 --- /dev/null +++ b/lib/clients/zdb/readme.md @@ -0,0 +1,26 @@ +## Vlang ZDB Client + +to use: + +- build zero db from source: https://github.com/threefoldtech/0-db +- run zero db from root of 0db folder: + `./zdbd/zdb --help || true` for more info + +## to use test + +```bash +#must set unix domain with --socket argument when running zdb +#run zdb as following: +mkdir -p ~/.zdb +zdb --socket ~/.zdb/socket --admin 1234 +redis-cli -s ~/.zdb/socket +#or easier: +redis-cli -s ~/.zdb/socket --raw nsinfo default +``` + +then in the redis-cli can do e.g. + +``` +nsinfo default +``` + diff --git a/lib/clients/zdb/zdb.v b/lib/clients/zdb/zdb.v new file mode 100644 index 00000000..846f4b5a --- /dev/null +++ b/lib/clients/zdb/zdb.v @@ -0,0 +1,229 @@ +module zdb + +import freeflowuniverse.herolib.clients.redisclient +import freeflowuniverse.herolib.ui.console + +pub struct ZDB { +pub mut: + redis redisclient.Redis +} + +// https://redis.io/topics/protocol +// examples: +// localhost:6379 +// /tmp/redis-default.sock +pub fn get(addr string, auth string, namespace string) !ZDB { + console.print_header(' ZDB get: addr:${addr} namespace:${namespace}') + mut redis := redisclient.get(addr)! + mut zdb := ZDB{ + redis: redis + } + + if auth != '' { + zdb.redis.send_expect_ok(['AUTH', auth])! + } + + if namespace != '' { + mut namespaces := zdb.redis.send_expect_list_str(['NSLIST'])! + namespaces.map(it.to_lower()) + + if namespace.to_lower() !in namespaces { + zdb.redis.send_expect_ok(['NSNEW', namespace])! + } + } + + return zdb +} + +pub fn (mut zdb ZDB) ping() !string { + return zdb.redis.send_expect_str(['PING'])! +} + +// if key not specified will get incremental key +pub fn (mut zdb ZDB) set(key string, val string) !string { + return zdb.redis.send_expect_str(['SET', key, val])! +} + +pub fn (mut zdb ZDB) get(key string) !string { + return zdb.redis.send_expect_str(['GET', key])! +} + +pub fn (mut zdb ZDB) mget(key string) !string { + return zdb.redis.send_expect_str(['GET', key])! +} + +pub fn (mut zdb ZDB) del(key string) !string { + return zdb.redis.send_expect_str(['DEL', key])! +} + +// used only for debugging, to check memory leaks +pub fn (mut zdb ZDB) stop() !string { + return zdb.redis.send_expect_str(['STOP'])! +} + +pub fn (mut zdb ZDB) exists(key string) !string { + return zdb.redis.send_expect_str(['EXISTS', key])! +} + +pub fn (mut zdb ZDB) check(key string) !string { + return zdb.redis.send_expect_str(['CHECK', key])! +} + +pub fn (mut zdb ZDB) keycur(key string) !string { + return zdb.redis.send_expect_str(['KEYCUR', key])! +} + +pub fn (mut zdb ZDB) info() !string { + i := zdb.redis.send_expect_str(['INFO'])! + return i +} + +pub fn (mut zdb ZDB) nsnew(namespace string) !string { + i := zdb.redis.send_expect_str(['NSNEW', namespace])! + return i +} + +pub fn (mut zdb ZDB) nsdel(namespace string) !string { + i := zdb.redis.send_expect_str(['NSDEL', namespace])! + return i +} + +pub fn (mut zdb ZDB) nsinfo(namespace string) !map[string]string { + i := zdb.redis.send_expect_str(['NSINFO', namespace])! + mut res := map[string]string{} + + for line in i.split_into_lines() { + if line.starts_with('#') { + continue + } + if !(line.contains(':')) { + continue + } + splitted := line.split(':') + key := splitted[0] + val := splitted[1] + res[key.trim_space()] = val.trim_space() + } + return res +} + +pub fn (mut zdb ZDB) nslist() ![]string { + i := zdb.redis.send_expect_list_str(['NSLIST'])! + return i +} + +pub fn (mut zdb ZDB) nssset(ns string, prop string, val string) !string { + i := zdb.redis.send_expect_str(['NSSET', ns, prop, val])! + return i +} + +struct SelectArgs { + namespace string + password string +} + +pub fn (mut zdb ZDB) select_ns(args SelectArgs) !string { + mut redis_args := ['SELECT', args.namespace] + if args.password != '' { + redis_args << 'SECURE' + redis_args << args.password + } + i := zdb.redis.send_expect_str(redis_args)! + return i +} + +pub fn (mut zdb ZDB) dbsize() !string { + i := zdb.redis.send_expect_str(['DBSIZE'])! + return i +} + +pub fn (mut zdb ZDB) time() !string { + i := zdb.redis.send_expect_str(['TIME'])! + return i +} + +pub fn (mut zdb ZDB) auth(password string) !string { + i := zdb.redis.send_expect_str(['AUTH', password])! + return i +} + +pub fn (mut zdb ZDB) auth_secure() !string { + i := zdb.redis.send_expect_str(['AUTH', 'SECURE'])! + return i +} + +pub struct ScanArgs { + cursor string +} + +pub fn (mut zdb ZDB) scan(args ScanArgs) !string { + mut redis_args := ['SCAN'] + if args.cursor != '' { + redis_args << args.cursor + } + i := zdb.redis.send_expect_str(redis_args)! + return i +} + +// this is just an alias for SCAN +pub fn (mut zdb ZDB) scanx(args ScanArgs) !string { + mut redis_args := ['SCANX'] + if args.cursor != '' { + redis_args << args.cursor + } + i := zdb.redis.send_expect_str(redis_args)! + return i +} + +pub fn (mut zdb ZDB) rscan(args ScanArgs) !string { + mut redis_args := ['RSCAN'] + if args.cursor != '' { + redis_args << args.cursor + } + i := zdb.redis.send_expect_str(redis_args)! + return i +} + +struct WaitArgs { + cmd string + timeout string = '5' +} + +pub fn (mut zdb ZDB) wait(args WaitArgs) !string { + i := zdb.redis.send_expect_str(['WAIT', args.cmd, args.timeout])! + return i +} + +struct HistoryArgs { + key string + bin_data string +} + +pub fn (mut zdb ZDB) history(args HistoryArgs) ![]string { + mut redis_args := ['HISTORY', args.key] + if args.bin_data != '' { + redis_args << args.bin_data + } + i := zdb.redis.send_expect_list_str(redis_args)! + return i +} + +pub fn (mut zdb ZDB) flush() !string { + i := zdb.redis.send_expect_str(['FLUSH'])! + return i +} + +pub fn (mut zdb ZDB) hooks() ![]string { + i := zdb.redis.send_expect_list_str(['HOOKS'])! + return i +} + +pub fn (mut zdb ZDB) index_dirty() ![]string { + i := zdb.redis.send_expect_list_str(['INDEX DIRTY'])! + return i +} + +pub fn (mut zdb ZDB) index_dirty_reset() !string { + i := zdb.redis.send_expect_str(['INDEX DIRTY RESET'])! + return i +} diff --git a/lib/clients/zdb/zdb_test.v b/lib/clients/zdb/zdb_test.v new file mode 100644 index 00000000..90da09c7 --- /dev/null +++ b/lib/clients/zdb/zdb_test.v @@ -0,0 +1,19 @@ +module zdb + +// TODO: enable this test when we have running zdb in ci also implement missing tests +fn test_get() { + // // must set unix domain with --socket argument when running zdb + // // run zdb as following: + // // mkdir -p ~/.zdb/ && zdb --socket ~/.zdb/socket --admin 1234 + // mut zdb := get('~/.zdb/socket', '1234', 'test')! + + // // check info returns info about zdb + // info := zdb.info()! + // assert info.contains('server_name: 0-db') + + // nslist := zdb.nslist()! + // assert nslist == ['default', 'test'] + + // nsinfo := zdb.nsinfo('default')! + // assert 'name: default' in nsinfo +} diff --git a/lib/core/installers/redis.v b/lib/core/installers/redis.v index cef3bfc7..86aa87a2 100644 --- a/lib/core/installers/redis.v +++ b/lib/core/installers/redis.v @@ -42,7 +42,7 @@ pub fn install(args_ InstallArgs) ! { if osal.is_linux() { osal.package_install('redis-server')! } else { - osal.package_install('redis')!/Users/despiegk1/code/github/freeflowuniverse/crystallib/crystallib/installers/db/redis/template + osal.package_install('redis')!/Users/despiegk1/code/github/freeflowuniverse/herolib/herolib/installers/db/redis/template } } osal.execute_silent('mkdir -p ${args.datadir}')! diff --git a/lib/core/pathlib/factory.v b/lib/core/pathlib/factory.v new file mode 100644 index 00000000..48e0093d --- /dev/null +++ b/lib/core/pathlib/factory.v @@ -0,0 +1,136 @@ +module pathlib + +import os + +// gets Path object, will check if it exists, is dir_file, ... +pub fn get(path_ string) Path { + mut p2 := get_no_check(path_) + p2.check() + return p2 +} + +pub fn get_no_check(path_ string) Path { + mut path := path_ + if path.contains('~') { + path = path.replace('~', os.home_dir()) + } + if path.contains('file://') { + path = path.trim_string_left('file://') + } + mut p2 := Path{ + path: path + } + if p2.path.contains('..') { + p2.path = p2.absolute() + } + return p2 +} + +@[params] +pub struct GetArgs { +pub mut: + path string + create bool + check bool = true // means will check the dir, link or file exists + empty bool // will empty the dir or the file + delete bool +} + +// get a directory, or needs to be created +// if the dir doesn't exist and is not created, then there will be an error +pub fn get_dir(args_ GetArgs) !Path { + mut args := args_ + if args.empty { + args.create = true + } + if args.create { + args.check = true + } + mut p2 := get_no_check(args.path) + if args.check { + p2.check() + p2.absolute() + if p2.exist == .no { + if args.create { + os.mkdir_all(p2.absolute()) or { return error('cannot create path ${p2}, ${err}') } // Make sure that all the needed paths created + p2.check() + } + return p2 + } + if !p2.is_dir() { + return error('Path ${args.path} is not a dir.') + } + if args.empty { + p2.empty()! + } + if args.delete { + p2.delete()! + } + } + return p2 +} + +pub fn get_file(args_ GetArgs) !Path { + mut args := args_ + if args.empty { + args.create = true + } + if args.create { + args.check = true + } + mut p2 := get_no_check(args.path) + if args.check { + p2.check() + if args.create { + mut parent_ := p2.parent()! + parent_.check() + if parent_.exist == .no { + os.mkdir_all(parent_.path) or { return error('cannot create path:${args.path}') } + } + if p2.exist == .no || args.empty { + os.write_file(args.path, '') or { + return error('cannot create empty file:${args.path} ${err}') + } + p2.check() + } + } + if p2.exists() && !p2.is_file() { + return error('Path ${args.path} is not a file.') + } + if args.delete { + p2.delete()! + } + } + return p2 +} + +pub fn get_link(args_ GetArgs) !Path { + mut args := args_ + if args.create { + return error("can't create link out of nothing") + } + mut p2 := get_no_check(args.path) + if args.check { + p2.check() + if !p2.exists() { + p2.cat = Category.linkfile + return p2 + } + if !p2.is_link() { + return error('Path ${args.path} is not a link.') + } + if args.delete { + p2.delete()! + } + if args.empty { + mut p3 := p2.getlink()! + p3.empty()! + } + } + return p2 +} + +// gets working directory +pub fn get_wd() Path { + return get_dir(path: os.getwd()) or { panic('This should never happen') } +} diff --git a/lib/core/pathlib/path.v b/lib/core/pathlib/path.v new file mode 100644 index 00000000..7a6d0f56 --- /dev/null +++ b/lib/core/pathlib/path.v @@ -0,0 +1,133 @@ +module pathlib + +import freeflowuniverse.herolib.core.texttools +import os + +@[heap] +pub struct Path { +pub mut: + path string + cat Category + exist UYN +} + +pub enum Category { + unknown + file + dir + linkdir + linkfile +} + +pub enum UYN { + unknown + yes + no +} + +// return absolute path . +// careful symlinks will not be resolved +pub fn (path Path) absolute() string { + mut p := path.path.replace('~', os.home_dir()) + return os.abs_path(p) +} + +// return absolute path . +// careful the symlinks will be followed !!! +pub fn (path Path) realpath() string { + mut p := path.path.replace('~', os.home_dir()) + mut p2 := os.real_path(p) + p2 = os.abs_path(p2) + return p2 +} + +pub fn (path Path) shortpath() string { + return path.realpath().replace(os.home_dir(), '~') +} + +// check the inside of pathobject, is like an init function +pub fn (mut path Path) check() { + if os.exists(path.path) { + path.exist = .yes + if os.is_file(path.path) { + if os.is_link(path.path) { + path.cat = Category.linkfile + } else { + path.cat = Category.file + } + } else if os.is_dir(path.path) { + if os.is_link(path.path) { + path.cat = Category.linkdir + } else { + path.cat = Category.dir + } + } else { + panic('cannot define type: ${path.path}, is bug') + } + } else { + path.exist = .no + } +} + +fn (mut path Path) check_exists() ! { + if !path.exists() { + return error('Path ${path} needs to exist, error') + } +} + +// returns name with extension +pub fn (path Path) name() string { + return os.base(path.path) +} + +// return name with all lowercase_special chars done and also no extension +pub fn (mut path Path) name_fix_no_underscore_no_ext() string { + return texttools.name_fix_no_underscore_no_ext(path.name_no_ext()) +} + +// return name with all lowercase_special chars done but keep extension +pub fn (mut path Path) name_fix_keepext() string { + return texttools.name_fix_keepext(path.name()) +} + +pub fn (mut path Path) name_fix_no_ext() string { + return texttools.name_fix_no_ext(path.name()) +} + +// full path of dir +pub fn (mut path Path) path_dir() string { + return os.dir(path.path) +} + +// QUESTION: should this mutate path's name, probably not? +pub fn (mut path Path) name_no_ext() string { + mut name := path.name() + if name.contains('.') { + name = name.all_before_last('.') + } + if name == '' { + return path.name() + } + return name +} + +pub fn (mut path Path) path_no_ext() string { + return path.path_dir() + '/' + path.name_no_ext() +} + +pub fn (mut path Path) name_ends_with_underscore() bool { + return path.name_no_ext().ends_with('_') +} + +// return a path which has name ending with _ +pub fn (mut path Path) path_get_name_with_underscore() string { + if path.name_ends_with_underscore() { + return path.path + } else { + return path.path.all_before_last('.') + '_.' + path.extension() + } +} + +// pub fn (mut p Path) str() string { +// return 'path: $p.path' +// } diff --git a/lib/core/pathlib/path_backup.v b/lib/core/pathlib/path_backup.v new file mode 100644 index 00000000..e2deb564 --- /dev/null +++ b/lib/core/pathlib/path_backup.v @@ -0,0 +1,169 @@ +module pathlib + +import os +import freeflowuniverse.herolib.ui.console +// import time + +@[params] +pub struct BackupArgs { +pub mut: + root string + dest string + overwrite bool + restore bool // if we want to find the latest one, if we can't find one then its error +} + +// start from existing name and look for name.$nr.$ext, nr need to be unique, ideal for backups +// if dest "" then will use the directory of the fileitself + "/.backup" +// e.g. /code/myaccount/despiegk/somedir/test.v if +// would be backed up to /code/myaccount/despiegk/somedir/.backup/test.1.v +// root is the start of the dir we process +// e.g. /code/myaccount/despiegk/somedir/test.v if +// if source = /code/myaccount/despiegk and dest = /backup then the file will be backed up to /backup/somedir/test.1.v +// +// struct BackupArgs{ +// root string +// dest string +// overwrite bool +// restore bool //if we want to find the latest one, if we can't find one then its error +// } +// if overwrite this means will overwrite the last one in the directory +pub fn (mut path Path) backup_path(args BackupArgs) !Path { + if !path.exists() && args.restore == false { + error('cannot find path, so cannot create backup for ${path}') + } + mut dest := '' + mut rel := '' + + if args.dest == '' { + dest = path.path_dir() + '/.backup' + } + if !os.exists(dest) { + os.mkdir_all(dest)! + } + + if args.dest != '' || args.root != '' { + panic('not implemented') + } + + // if source != '' { + // path_abs := path.absolute() + // mut source_path := Path{ + // path: source + // }.absolute() + // if path_abs.starts_with(source_path) { + // rel = os.dir(path_abs.substr(source_path.len + 1, path_abs.len)) + '/' + // } + // } + // os.mkdir_all('$dest/$rel')! + + for i in 0 .. 1000 { + console.print_debug(i.str()) + path_str := '${dest}/${rel}${path.name_no_ext()}.${path.extension()}.${i}' + path_str_next := '${dest}/${rel}${path.name_no_ext()}.${path.extension()}.${i + 1}' + mut path_found := Path{ + path: path_str + cat: .file + } + mut path_found_next := Path{ + path: path_str_next + cat: .file + } + if !path_found.exists() { + if args.restore { + return error('could not find a backup file in ${path_found.path} for restore') + } + path_found.exists() + return path_found + } + + size := path_found.size()! + + if size > 0 { + // console.print_debug("size > 0 ") + // this makes sure we only continue if there is no next file, we only need to check size for latest one + if !path_found_next.exists() { + // means is the last file + // console.print_debug("current: ${path_found}") + // console.print_debug("next: ${path_found_next}") + // console.print_debug(args) + if args.restore || args.overwrite { + // console.print_debug("RESTORE: $path_found") + return path_found + } + size2 := path_found.size()! + if size2 == size { + // means we found the last one which is same as the one we are trying to backup + // console.print_debug("*** SIZE EQUAL EXISTS") + path_found.exist = .yes + return path_found + } + // console.print_debug("nothing") + } + } + } + return error('cannot find path for backup') +} + +// create a backup, will maintain the extension +pub fn (mut path Path) backup(args BackupArgs) !Path { + // console.print_debug(path.path) + mut pbackup := path.backup_path(args)! + if !pbackup.exists() { + os.cp(path.path, pbackup.path)! + } + return pbackup +} + +pub fn (mut path Path) restore(args BackupArgs) ! { + // console.print_debug("restore") + // console.print_debug(path.path) + mut args2 := args + args2.restore = true + mut prestore := path.backup_path(args2)! + if args.overwrite || path.exists() { + os.cp(prestore.path, path.path)! + } else { + return error('Cannot restore, because to be restored file exists: ${path.path}\n${args}') + } +} + +pub fn (mut path Path) backups_remove(args BackupArgs) ! { + mut pl := path.list(recursive: true)! + for mut p in pl.paths { + if p.is_dir() { + if p.name() == '.backup' { + p.delete()! + } + } + } + // TODO: is not good enough, can be other path +} + +// //represents one directory in which backup was done +// struct BackupDir{ +// pub mut: +// items []BackupItem +// path Path //path where the backed up items are in +// } + +// pub struct BackupItem{ +// pub: +// name string //only the base name of the file +// hash string +// time time.Time +// backupdir &BackupDir +// } + +// get the pathobject +// pub fn (bi BackupItem) path_get() Path { +// return get("${bi.backupdir.path.path}/${bi.name}") +// } + +// //save the metadata for the backups +// pub fn (mut backupdir BackupDir) metadate_save() ! { +// mut out :=[]string{} +// // for item in backupdir.items{ +// // out << item.metadata() +// // } +// } diff --git a/lib/core/pathlib/path_copy.v b/lib/core/pathlib/path_copy.v new file mode 100644 index 00000000..496a457b --- /dev/null +++ b/lib/core/pathlib/path_copy.v @@ -0,0 +1,64 @@ +module pathlib + +import os + +@[params] +pub struct CopyArgs { +pub mut: + dest string // path + delete bool // if true will remove files which are on dest which are not on source + rsync bool = true // we use rsync as default + ssh_target string // e.g. root@195.192.213.2:999 + ignore []string // arguments to ignore e.g. ['*.pyc','*.bak'] + ignore_default bool = true // if set will ignore a common set +} + +// copy file,dir is always recursive +// if ssh_target used then will copy over ssh e.g. . +// dest needs to be a directory or file . +// return Path of the destination file or dir . +pub fn (mut path Path) copy(args_ CopyArgs) ! { + mut args := args_ + if args.ignore.len > 0 || args.ssh_target.len > 0 { + args.rsync = true + } + path.check() + if !path.exists() { + return error("can't find path for copy operation on ${path.path}") + } + if args.rsync == true { + rsync( + source: path.path + dest: args.dest + delete: args.delete + ipaddr_dst: args.ssh_target + ignore: args.ignore + ignore_default: args.ignore_default + )! + } else { + mut dest := get(args.dest) + if dest.exists() { + if !(path.cat in [.file, .dir] && dest.cat in [.file, .dir]) { + return error('Source or Destination path is not file or directory.\n\n${path.path} cat:${path.cat}---${dest.path} cat:${dest.cat}') + } + if path.cat == .dir && dest.cat == .file { + return error("Can't copy directory to file") + } + } + if path.cat == .file && dest.cat == .dir { + // In case src is a file and dest is dir, we need to join the file name to the dest file + file_name := os.base(path.path) + dest.path = os.join_path(dest.path, file_name) + } + + if !os.exists(dest.path_dir()) { + os.mkdir_all(dest.path_dir())! + } + // $if debug { + // console.print_debug(' copy: ${path.path} ${dest.path}') + // } + os.cp_all(path.path, dest.path, true)! // Always overwite if needed + + dest.check() + } +} diff --git a/lib/core/pathlib/path_crypto.v b/lib/core/pathlib/path_crypto.v new file mode 100644 index 00000000..86420764 --- /dev/null +++ b/lib/core/pathlib/path_crypto.v @@ -0,0 +1,9 @@ +module pathlib + +import crypto.sha256 + +// return sha256 hash of a file +pub fn (mut path Path) sha256() !string { + c := path.read()! + return sha256.hexhash(c) +} diff --git a/lib/core/pathlib/path_extend.v b/lib/core/pathlib/path_extend.v new file mode 100644 index 00000000..6bd8cd7e --- /dev/null +++ b/lib/core/pathlib/path_extend.v @@ -0,0 +1,95 @@ +module pathlib + +// join parts to a path and return path, returns a new path, create if needed +pub fn (mut p Path) extend_dir_create(parts ...string) !Path { + mut out := p.path + if !p.is_dir() { + return error('Cannot only extend a dir.') + } + if p.exists() == false { + return error("Cannot extend a dir if it doesn't exist") + } + for part in parts { + if part.contains('~') { + return error('cannot extend part ${part} if ~ in') + } + part2 := part.trim(' ') + out += '/' + part2.trim('/') + } + out = out.replace('//', '/') + mut p2 := get_dir(path: out, create: true)! + return p2 +} + +// only works for a dir +pub fn (mut p Path) extend_file(name string) !Path { + if !p.is_dir() { + return error('Cannot only extend a dir.') + } + if p.exists() == false { + return error("Cannot extend a dir if it doesn't exist") + } + + mut out := p.path + if name.contains('~') { + return error('cannot extend dir if ~ in name: ${name}') + } + out += '/' + name.trim('/') + out = out.replace('//', '/') + mut p2 := get_file(path: out)! + return p2 +} + +// extend the path, path stays same, no return +// if dir, needs to stay dir +// anything else fails +pub fn (mut path Path) extend(parts ...string) ! { + if !path.is_dir() { + return error('can only extend dir, ${path}') + } + for part in parts { + if part.contains('~') { + return error('cannot extend part to ${part} if ~ in') + } + part2 := part.trim(' ') + path.path += '/' + part2 + } + if path.exists() { + if !path.is_dir() { + return error('can only extend dir if is dir again.') + } + } + path.path = path.path.replace('//', '/') + path.check() +} + +// pub fn (path Path) extend_dir(relpath string) ! { + +// relpath2 = relpath2.replace("\\","/") + +// if path.cat != Category.dir{ +// return error("cannot only extend a dir, not a file or a link. $path") +// } +// return dir_new("$path/relpath2") +// } + +// pub fn (path Path) extend_file_exists(relpath string) !Path { + +// mut relpath2 := relpath.trim(" ") + +// relpath2 = relpath2.replace("\\","/") + +// if path.cat != Category.dir{ +// return error("cannot only extend a dir, not a file or a link. $path") +// } +// return file_new_exists("$path/relpath2") +// } + +// pub fn (path Path) extend_exists(relpath string) !Path { + +// p2 := path.extend(relpath)! +// if ! p2.exists(){ +// return error("cannot extend $path with $relpath, directory does not exist") +// } +// return p2 +// } diff --git a/lib/core/pathlib/path_is.v b/lib/core/pathlib/path_is.v new file mode 100644 index 00000000..13b12a09 --- /dev/null +++ b/lib/core/pathlib/path_is.v @@ -0,0 +1,59 @@ +module pathlib + +const image_exts = ['jpg', 'jpeg', 'png', 'gif', 'svg'] + +const image_exts_basic = ['jpg', 'jpeg', 'png'] + +pub fn (mut path Path) is_dir() bool { + if path.cat == Category.unknown { + // panic('did not check path yet, category unknown') + path.check() + } + return path.cat == Category.dir || path.cat == Category.linkdir +} + +// check is dir and a link +pub fn (mut path Path) is_dir_link() bool { + if path.cat == .unknown { + // panic('did not check path yet') + path.check() + } + return path.cat == Category.linkdir +} + +// is a file but no link +pub fn (mut path Path) is_file() bool { + if path.cat == .unknown { + // panic('did not check path yet') + path.check() + } + return path.cat == Category.file +} + +pub fn is_image(path string) bool { + if path.contains('.') { + ext := path.all_after_last('.').to_lower() + return image_exts.contains(ext) + } + return false +} + +pub fn (path Path) is_image() bool { + e := path.extension().to_lower() + // console.print_debug("is image: $e") + return image_exts.contains(e) +} + +pub fn (path Path) is_image_jpg_png() bool { + e := path.extension().to_lower() + // console.print_debug("is image: $e") + return image_exts_basic.contains(e) +} + +pub fn (path Path) is_link() bool { + if path.cat == .unknown { + // console.print_debug(path) + panic('did not check path yet.') + } + return path.cat == Category.linkfile || path.cat == Category.linkdir +} diff --git a/lib/core/pathlib/path_link.v b/lib/core/pathlib/path_link.v new file mode 100644 index 00000000..430cbbd8 --- /dev/null +++ b/lib/core/pathlib/path_link.v @@ -0,0 +1,117 @@ +module pathlib + +import os +// import freeflowuniverse.herolib.ui.console + +// path needs to be existing +// linkpath is where the link will be (the symlink who points to path) +pub fn (mut path Path) link(linkpath string, delete_exists bool) !Path { + if !path.exists() { + return error('cannot link because source ${path.path} does not exist') + } + if !(path.cat == .file || path.cat == .dir) { + return error('cannot link because source ${path.path} can only be dir or file') + } + + if path_equal(path.path, linkpath) { + return error('try to link to myself. Link dest & source same. ${linkpath}') + } + // TODO: add test to confirm existing faulty link also are removed + // os.exists for faulty links returns false so also checks if path is link + if os.exists(linkpath) || os.is_link(linkpath) { + if delete_exists { + mut linkpath_obj := get(linkpath) + linkpath_obj.delete()! + } else { + return error('cannot link ${path.path} to ${linkpath}, because dest exists.') + } + } + + mut origin_path := '' + dest_dir := os.dir(linkpath) + if !os.exists(dest_dir) { + os.mkdir_all(dest_dir)! + } + if path.cat == .dir { + origin_path = path_relative(dest_dir, path.path)! + } else { + origin_path = path_relative(dest_dir, path.path)! + } + // console.print_debug("${dest_dir} ::: ${origin_path} ::: ${linkpath}") + + msg := 'link to origin (source): ${path.path} \nthe link:${linkpath} \nlink rel: ${origin_path}' + // TODO: figure out why os.symlink doesn't work for linking file into dir + os.symlink(origin_path, linkpath) or { return error('cant symlink ${msg}\n${err}') } + return get(linkpath) +} + +// will make sure that the link goes from file with largest path to smallest +// good to make sure we have links always done in same way +pub fn (mut path Path) relink() ! { + if !path.is_link() { + return + } + + link_abs_path := path.absolute() // symlink not followed + link_real_path := path.realpath() // this is with the symlink resolved + if compare_strings(link_abs_path, link_real_path) >= 0 { + // means the shortest path is the target (or if same size its sorted and the first) + return + } + // need to switch link with the real content + path.unlink()! // make sure both are files now (the link is the file) + path.link(link_real_path, true)! // re-link + path.check() + + // TODO: in test script +} + +// resolve link to the real content +// copy the target of the link to the link +pub fn (mut path Path) unlink() ! { + if !path.is_link() { + // nothing to do because not link, will not giver error + return + } + if path.is_dir() { + return error('Cannot unlink a directory: ${path.path}') + } + link_abs_path := path.absolute() + link_real_path := path.realpath() // this is with the symlink resolved + mut link_path := get(link_real_path) + // $if debug { + // console.print_header(' copy source file:'$link_real_path' of link to link loc:'$link_abs_path'") + // } + mut destpath := get(link_abs_path + '.temp') // lets first copy to the .temp location + link_path.copy(dest: destpath.path)! // copy to the temp location + path.delete()! // remove the file or dir which is link + destpath.rename(path.name())! // rename to the new path + path.path = destpath.path // put path back + path.check() + // TODO: in test script +} + +// return string +pub fn (mut path Path) readlink() !string { + // console.print_stdout('path: $path') + if path.is_link() { + // console.print_stdout('path2: $path') + cmd := 'readlink ${path.path}' + res := os.execute(cmd) + if res.exit_code > 0 { + return error('cannot define result for link of ${path} \n${error}') + } + return res.output.trim_space() + } else { + return error('can only read link info when the path is a filelink or dirlink. ${path}') + } +} + +// return path object which is the result of the link (path link points too) +pub fn (mut path Path) getlink() !Path { + if path.is_link() { + return get(path.realpath()) + } else { + return error('can only get link when the path is a filelink or dirlink. ${path}') + } +} diff --git a/lib/core/pathlib/path_link_test.v b/lib/core/pathlib/path_link_test.v new file mode 100644 index 00000000..2467fb4f --- /dev/null +++ b/lib/core/pathlib/path_link_test.v @@ -0,0 +1,147 @@ +import freeflowuniverse.herolib.core.pathlib { Path } +import freeflowuniverse.herolib.ui.console +import os + +const testpath = os.dir(@FILE) + '/examples/test_path' + +fn testsuite_begin() { + console.print_debug('create files for link test') + os.rmdir_all(os.dir(@FILE) + '/examples') or {} + assert !os.is_dir(testpath) + os.mkdir_all(testpath) or { panic(err) } + os.mkdir_all('${testpath}/test_parent') or { panic(err) } + os.create('${testpath}/testfile1.md') or { panic(err) } + os.create('${testpath}/test_parent/testfile2.md') or { panic(err) } + os.create('${testpath}/test_parent/testfile3.md') or { panic(err) } +} + +fn testsuite_end() { + os.rmdir_all(os.dir(@FILE) + '/examples') or {} +} + +fn test_link() { + testsuite_begin() + console.print_stdout('************ TEST_link ************') + mut source1 := pathlib.get('${testpath}/test_parent/testfile2.md') + mut source2 := pathlib.get('${testpath}/test_parent/testfile3.md') + mut source3 := pathlib.get('${testpath}/testfile1.md') + + assert source1.exists() + assert source2.exists() + assert source3.exists() + + // link to a parent + mut link11 := source3.link('${testpath}/test_parent/uplink', true) or { + panic('no uplink: ${err}') + } + mut link11_link := pathlib.get('${testpath}/test_parent/uplink') + path11 := link11_link.readlink() or { panic(err) } + assert path11 == '../testfile1.md' + + // test delete exists with nonexistent dest + mut dest := pathlib.get('${testpath}/test_link.md') + assert !dest.exists() + mut link1 := source1.link(dest.path, true) or { panic('no link: ${err}') } + assert link1.path == '${testpath}/test_link.md' + dest = pathlib.get('${testpath}/test_link.md') + assert dest.exists() + + // test delete exists with existing dest + assert dest.realpath() == source1.path + mut link2 := source2.link(dest.path, true) or { panic('no link ${err}') } + assert link2.path == '${testpath}/test_link.md' + assert link2.realpath() != source1.path + assert link2.realpath() == source2.path + + // test delete_exists false with existing dest + dest = pathlib.get('${testpath}/test_link.md') + assert dest.realpath() == source2.path + mut link3 := source1.link(dest.path, false) or { Path{} } + assert link3.path == '' // link should error so check empty path obj + dest = pathlib.get('${testpath}/test_link.md') + assert dest.realpath() == source2.path // dest reamins unchanged + + dest.delete() or {} + console.print_stdout('Link function working correctly') +} + +fn test_readlink() { + testsuite_begin() + console.print_stdout('************ TEST_readlink ************') + // test with none link path + mut source := pathlib.get('${testpath}/test_parent/testfile2.md') + mut dest_ := '${testpath}/test_readlink.md' + path := source.readlink() or { '' } + assert path == '' // is not a link so cannot read + + // test with filelink path + mut link := source.link(dest_, true) or { panic('error: ${err}') } + mut dest := pathlib.get(dest_) + + assert dest.cat == .linkfile + assert dest.path == dest_ + + link_source := dest.readlink() or { panic(err) } + assert link_source == 'test_parent/testfile2.md' + + dest.delete() or {} + console.print_stdout('Readlink function working correctly') +} + +// fn test_unlink() { +// console.print_stdout('************ TEST_unlink ************') +// // test with filelink path + +// mut source := pathlib.get('${testpath}/test_parent/testfile3.md') +// mut dest_ := '${testpath}/test_unlink.md' + +// mut link := source.link(dest_, true) or { panic('error: ${err}') } +// mut dest := pathlib.get(dest_) + +// // TODO: check if content is from source + +// assert dest.cat == .linkfile +// dest.unlink() or { panic('Failed to unlink: ${err}') } +// assert dest.exists() +// assert dest.cat == .file + +// dest.delete()! + +// // TODO: maybe more edge cases? +// console.print_stdout('Unlink function working correctly') +// } + +fn test_relink() { + testsuite_begin() + console.print_stdout('************ TEST_relink ************') + + mut source := pathlib.get('${testpath}/test_parent/testfile2.md') + mut dest_ := '${testpath}/test_relink.md' + mut link := source.link(dest_, true) or { panic('error: ${err}') } + mut dest := pathlib.get(dest_) + + // linked correctly so doesn't change + assert source.cat == .file + assert dest.cat == .linkfile + dest.relink() or { panic('Failed to relink: ${err}') } + source_new := pathlib.get(source.path) + assert source_new.cat == .file + assert dest.cat == .linkfile + + // switching source and destination + mut source2 := pathlib.get(dest_) + source2.unlink() or { panic('Failed to unlink: ${err}') } + mut dest2_ := source.path + + // linked incorrectly so should relink + mut link2 := source2.link(dest2_, true) or { panic('error: ${err}') } + mut dest2 := pathlib.get(dest2_) + assert source2.cat == .file + assert dest2.cat == .linkfile + dest2.relink() or { panic('Failed to relink: ${err}') } + source2_new := pathlib.get(source2.path) + assert source2_new.cat == .linkfile + assert dest2.cat == .file + + dest.delete()! +} diff --git a/lib/core/pathlib/path_list.v b/lib/core/pathlib/path_list.v new file mode 100644 index 00000000..34048873 --- /dev/null +++ b/lib/core/pathlib/path_list.v @@ -0,0 +1,174 @@ +module pathlib + +import os +import regex +// import freeflowuniverse.herolib.core.smartid +import freeflowuniverse.herolib.ui.console + +@[params] +pub struct ListArgs { +pub mut: + regex []string + recursive bool = true + ignoredefault bool = true // ignore files starting with . and _ + include_links bool // wether to include links in list + dirs_only bool + files_only bool +} + +// the result of pathlist +pub struct PathList { +pub mut: + // is the root under which all paths are, think about it like a changeroot environment + root string + paths []Path +} + +// list all files & dirs, follow symlinks . +// will sort all items . +// return as list of Paths . +// . +// params: . +// ``` +// regex []string +// recursive bool // std off, means we recursive not over dirs by default +// ignoredefault bool = true // ignore files starting with . and _ +// dirs_only bool +// +// example see https://github.com/freeflowuniverse/herolib/blob/development/examples/core/pathlib/examples/list/path_list.v +// +// e.g. p.list(regex:[r'.*\.v$'])! //notice the r in front of string, this is regex for all files ending with .v +// +// ``` +// please note links are ignored for walking over dirstructure (for files and dirs) +pub fn (mut path Path) list(args_ ListArgs) !PathList { + // $if debug { + // console.print_header(' list: ${args_}') + // } + mut r := []regex.RE{} + for regexstr in args_.regex { + mut re := regex.regex_opt(regexstr) or { + return error("cannot create regex for:'${regexstr}'") + } + // console.print_debug(re.get_query()) + r << re + } + mut args := ListArgsInternal{ + regex: r + recursive: args_.recursive + ignoredefault: args_.ignoredefault + dirs_only: args_.dirs_only + files_only: args_.files_only + include_links: args_.include_links + } + paths := path.list_internal(args)! + mut pl := PathList{ + root: path.path + paths: paths + } + return pl +} + +@[params] +pub struct ListArgsInternal { +mut: + regex []regex.RE // only put files in which follow one of the regexes + recursive bool = true + ignoredefault bool = true // ignore files starting with . and _ + dirs_only bool + files_only bool + include_links bool +} + +fn (mut path Path) list_internal(args ListArgsInternal) ![]Path { + debug := false + path.check() + + if !path.is_dir() && (!path.is_dir_link() || !args.include_links) { + // return error('Path must be directory or link to directory') + return []Path{} + } + if debug { + console.print_header(' ${path.path}') + } + mut ls_result := os.ls(path.path) or { []string{} } + ls_result.sort() + mut all_list := []Path{} + for item in ls_result { + if debug { + console.print_stdout(' - ${item}') + } + p := os.join_path(path.path, item) + mut new_path := get(p) + // Check for dir and linkdir + if !new_path.exists() { + // to deal with broken link + continue + } + if new_path.is_link() && !args.include_links { + continue + } + if args.ignoredefault { + if item.starts_with('_') || item.starts_with('.') { + continue + } + } + if new_path.is_dir() || (new_path.is_dir_link() && args.include_links) { + // If recusrive + if args.recursive { + mut rec_list := new_path.list_internal(args)! + all_list << rec_list + } else { + if !args.files_only { + all_list << new_path + } + continue + } + } + + mut addthefile := true + for r in args.regex { + if !(r.matches_string(item)) { + addthefile = false + } + } + if addthefile && !args.dirs_only { + if !args.files_only || new_path.is_file() { + all_list << new_path + } + } + } + return all_list +} + +// copy all +pub fn (mut pathlist PathList) copy(dest string) ! { + for mut path in pathlist.paths { + path.copy(dest: dest)! + } +} + +// delete all +pub fn (mut pathlist PathList) delete() ! { + for mut path in pathlist.paths { + path.delete()! + } +} + +// sids_acknowledge . +// pub fn (mut pathlist PathList) sids_acknowledge(cid smartid.CID) ! { +// for mut path in pathlist.paths { +// path.sids_acknowledge(cid)! +// } +// } + +// // sids_replace . +// // find parts of text in form sid:*** till sid:****** . +// // replace all occurrences with new sid's which are unique . +// // cid = is the circle id for which we find the id's . +// // sids will be replaced in the files if they are different +// pub fn (mut pathlist PathList) sids_replace(cid smartid.CID) ! { +// for mut path in pathlist.paths { +// path.sids_replace(cid)! +// } +// } diff --git a/lib/core/pathlib/path_list_test.v b/lib/core/pathlib/path_list_test.v new file mode 100644 index 00000000..6b368922 --- /dev/null +++ b/lib/core/pathlib/path_list_test.v @@ -0,0 +1,51 @@ +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +import os + +const testpath = os.dir(@FILE) + '/testdata' + +fn testsuite_begin() { + os.rmdir_all(testpath) or {} + assert !os.is_dir(testpath) + os.mkdir_all(testpath) or { panic(err) } + os.mkdir_all('${testpath}/test_parent') or { panic(err) } + + // create some files for testing + os.create('${testpath}/testfile.txt')! + os.create('${testpath}/test_parent/subfile.txt')! + os.mkdir('${testpath}/test_parent/test_child')! + os.create('${testpath}/test_parent/test_child/subsubfile.txt')! +} + +fn testsuite_end() { + os.rmdir_all(testpath) or {} +} + +fn test_list() { + console.print_stdout('************ TEST_list ************') + mut test_path_dir := pathlib.get('${testpath}') + result := test_path_dir.list(recursive: true) or { panic(err) } + console.print_debug('${result}') +} + +fn test_list_dirs() { + console.print_stdout('************ TEST_list_dir ************') + mut test_path_dir := pathlib.get('${testpath}') + result := test_path_dir.list(recursive: true) or { panic(err) } + console.print_debug('${result}') +} + +fn test_list_files() { + console.print_stdout('************ TEST_list_files ************') + mut test_path_dir := pathlib.get('${testpath}') + mut fl := test_path_dir.list() or { panic(err) } + result := fl.paths + assert result.len == 5 +} + +fn test_list_links() { + console.print_stdout('************ TEST_list_link ************') + mut test_path_dir := pathlib.get('${testpath}') + result := test_path_dir.list(pathlib.ListArgs{}) or { panic(err) } + console.print_debug('${result}') +} diff --git a/lib/core/pathlib/path_md5.v b/lib/core/pathlib/path_md5.v new file mode 100644 index 00000000..38dbaeec --- /dev/null +++ b/lib/core/pathlib/path_md5.v @@ -0,0 +1,56 @@ +module pathlib + +import crypto.md5 +import os +import io +import encoding.hex + +// return in hex format +pub fn (mut path Path) md5hex() !string { + mut r := path.md5()! + return hex.encode(r) +} + +// calculate md5 in reproducable way for directory as well as large file +pub fn (mut path Path) md5() ![]u8 { + path.check_exists()! + // console.print_header(' md5: $path.path") + if path.cat == .file { + mut d := md5.new() + mut ff := os.open(path.path)! + defer { + ff.close() + } + mut buffered_reader := io.new_buffered_reader(reader: ff) + chunk_size := 128 * 1024 // 128KB chunks, adjust as needed + mut buffer := []u8{len: chunk_size} + for { + bytes_read := buffered_reader.read(mut buffer) or { + if err.type_name() == 'io.Eof' { + break + } else { + return err + } + } + d.write(buffer[0..bytes_read])! + } + md5bytes := d.sum([]u8{}) + return md5bytes + } else { + mut pl := path.list(recursive: true)! + mut out := []string{} + for mut p in pl.paths { + md5bytes := p.md5()! + out << hex.encode(md5bytes) + } + // now we need to sort out, to make sure we always aggregate in same way + out.sort() + mut d := md5.new() + for o in out { + md5bytes2 := hex.decode(o)! + d.write(md5bytes2)! + } + md5bytes2 := d.sum([]u8{}) + return md5bytes2 + } +} diff --git a/lib/core/pathlib/path_recursive_txt.v b/lib/core/pathlib/path_recursive_txt.v new file mode 100644 index 00000000..b8c13260 --- /dev/null +++ b/lib/core/pathlib/path_recursive_txt.v @@ -0,0 +1,18 @@ +module pathlib + +// get all text for path and underneith (works for dir & file) +pub fn (mut path Path) recursive_text() ![]string { + mut res := []string{} + // path.check_exists()! + // console.print_debug("path recursive text: $path.path") + if path.cat == .file { + c := path.read()! + res << c.split_into_lines() + } else { + mut pl := path.list(recursive: true)! + for mut p in pl.paths { + res << p.recursive_text()! + } + } + return res +} diff --git a/lib/core/pathlib/path_rsync.v b/lib/core/pathlib/path_rsync.v new file mode 100644 index 00000000..8c38b456 --- /dev/null +++ b/lib/core/pathlib/path_rsync.v @@ -0,0 +1,151 @@ +module pathlib + +import os +import freeflowuniverse.herolib.ui.console + +@[params] +pub struct RsyncArgs { +pub mut: + source string + dest string + ipaddr_src string // e.g. root@192.168.5.5:33 (can be without root@ or :port) + ipaddr_dst string + delete bool // do we want to delete the destination + ignore []string // arguments to ignore e.g. ['*.pyc','*.bak'] + ignore_default bool = true // if set will ignore a common set + debug bool = true + fast_rsync bool + sshkey string +} + +// flexible tool to sync files from to, does even support ssh . +// args: . +// ``` +// source string +// dest string +// delete bool //do we want to delete the destination +// ipaddr_src string //e.g. root@192.168.5.5:33 (can be without root@ or :port) +// ipaddr_dst string //can only use src or dst, not both +// ignore []string //arguments to ignore +// ignore_default bool = true //if set will ignore a common set +// stdout bool = true +// ``` +// . +pub fn rsync(args_ RsyncArgs) ! { + mut args := args_ + if args.ipaddr_src.len == 0 { + get(args.source) + } + cmdoptions := rsync_cmd_options(args)! + $if debug { + console.print_debug(' rsync command:\nrsync ${cmdoptions}') + } + r := os.execute('which rsync') + if r.exit_code > 0 { + return error('Could not find the rsync command, please install.') + } + + cmd := 'rsync ${cmdoptions}' + res := os.execute(cmd) + if res.exit_code > 0 { + return error('could not execute rsync:\n${cmd}') + } + // cmdoptions2:=cmdoptions.replace(" "," ").split(" ").filter(it.trim_space()!="") + // os.execvp(rsyncpath, cmdoptions2)! +} + +// return the cmd with all rsync arguments . +// see rsync for usage of args +pub fn rsync_cmd_options(args_ RsyncArgs) !string { + mut args := args_ + mut cmd := '' + + // normalize + args.source = os.norm_path(args.source) + args.dest = os.norm_path(args.dest) + + mut delete := '' + if args.delete { + delete = '--delete' + } + mut options := '-rvz --no-perms' + if args.fast_rsync { + options += ' --size-only' + } + mut sshpart := '' + mut addrpart := '' + + mut exclude := '' + if args.ignore_default { + defaultset := ['*.pyc', '*.bak', '*dSYM'] + for item in defaultset { + if item !in args.ignore { + args.ignore << item + } + } + } + for excl in args.ignore { + exclude += " --exclude='${excl}'" + } + + args.source = args.source.trim_right('/ ') + args.dest = args.dest.trim_right('/ ') + + // if file is being copied to file dest, trailing slash shouldn't be there + mut src_path := get(args.source) + if !src_path.is_file() { + args.source = args.source + '/' + } + + if !src_path.is_file() { + args.dest = args.dest + '/' + } + + if args.ipaddr_src.len > 0 && args.ipaddr_dst.len == 0 { + sshpart, addrpart = rsync_ipaddr_format(ipaddr: args.ipaddr_src, sshkey: args.sshkey)! + cmd = '${options} ${delete} ${exclude} ${sshpart} ${addrpart}:${args.source} ${args.dest}' + } else if args.ipaddr_dst.len > 0 && args.ipaddr_src.len == 0 { + sshpart, addrpart = rsync_ipaddr_format(ipaddr: args.ipaddr_dst, sshkey: args.sshkey)! + cmd = '${options} ${delete} ${exclude} ${sshpart} ${args.source} ${addrpart}:${args.dest}' + } else if args.ipaddr_dst.len > 0 && args.ipaddr_src.len > 0 { + return error('cannot have source and dest as ssh') + } else { + cmd = '${options} ${delete} ${exclude} ${args.source} ${args.dest}' + } + return cmd +} + +@[params] +struct RsyncFormatArgs { +mut: + ipaddr string + user string = 'root' + port int = 22 + sshkey string +} + +fn rsync_ipaddr_format(args_ RsyncFormatArgs) !(string, string) { + mut args := args_ + if args.ipaddr.contains('@') { + args.user, args.ipaddr = args.ipaddr.split_once('@') or { panic('bug') } + } + if args.ipaddr.contains(':') { + mut port := '' + args.ipaddr, port = args.ipaddr.rsplit_once(':') or { panic('bug') } + args.port = port.int() + } + args.user = args.user.trim_space() + args.ipaddr = args.ipaddr.trim_space() + if args.ipaddr.len == 0 { + panic('ip addr cannot be empty') + } + // console.print_debug("- rsync cmd: ${args.user}@${args.ipaddr}:${args.port}") + mut sshkey := '' + if args.sshkey.len > 0 { + if !os.exists(args.sshkey) { + return error("can't find sshkey on path: ${args.sshkey}") + } + sshkey = '-i ${args.sshkey}' + } + return '-e \'ssh -o StrictHostKeyChecking=no ${sshkey} -p ${args.port}\'', '${args.user}@${args.ipaddr}' +} diff --git a/lib/core/pathlib/path_scanner.v b/lib/core/pathlib/path_scanner.v new file mode 100644 index 00000000..3dd444af --- /dev/null +++ b/lib/core/pathlib/path_scanner.v @@ -0,0 +1,79 @@ +module pathlib + +import freeflowuniverse.herolib.data.paramsparser + +type Filter0 = fn (mut Path, mut paramsparser.Params) !bool + +type Executor0 = fn (mut Path, mut paramsparser.Params) !paramsparser.Params + +// the filters are function which needs to return true if to process with alle executors . +// see https://github.com/freeflowuniverse/herolib/blob/development/examples/core/pathlib/examples/scanner/path_scanner.v . +// if any of the filters returns false then we don't continue . +// if we return True then it means the dir or file is processed . +// . +// type Filter0 = fn (mut Path, mut paramsparser.Params) bool +// type Executor0 = fn (mut Path, mut paramsparser.Params) !paramsparser.Params +// +pub fn (mut path Path) scan(mut parameters paramsparser.Params, filters []Filter0, executors []Executor0) !paramsparser.Params { + if !path.is_dir() { + return error('can only scan on dir.\n${path}') + } + return scan_recursive(mut path, mut parameters, filters, executors) +} + +fn scan_recursive(mut path Path, mut parameters paramsparser.Params, filters []Filter0, executors []Executor0) !paramsparser.Params { + // console.print_debug("recursive: $path") + // walk over filters if any of them returns false return and don't process + for f in filters { + needs_to_be_true := f(mut path, mut parameters) or { + msg := 'Cannot filter for ${path.path}\n${error}' + // console.print_debug(msg) + return error(msg) + } + if !needs_to_be_true { + return parameters + } + } + if path.is_dir() { + for e in executors { + parameters = e(mut path, mut parameters) or { + msg := 'Cannot process execution on dir ${path.path}\n${error}' + // console.print_debug(msg) + return error(msg) + } + } + mut pl := path.list(recursive: false) or { + return error('cannot list: ${path.path} \n${error}') + } + // llist.sort() + // first process the files and link + for mut p_in in pl.paths { + if !p_in.is_dir() { + scan_recursive(mut p_in, mut parameters, filters, executors) or { + msg := 'Cannot process recursive on ${p_in.path}\n${error}' + // console.print_debug(msg) + return error(msg) + } + } + } + // now process the dirs + for mut p_in in pl.paths { + if p_in.is_dir() { + scan_recursive(mut p_in, mut parameters, filters, executors) or { + msg := 'Cannot process recursive on ${p_in.path}\n${error}' + // console.print_debug(msg) + return error(msg) + } + } + } + } else { + for e in executors { + parameters = e(mut path, mut parameters) or { + msg := 'Cannot process execution on file ${path.path}\n${error}' + // console.print_debug(msg) + return error(msg) + } + } + } + return parameters +} diff --git a/lib/core/pathlib/path_sid.v b/lib/core/pathlib/path_sid.v new file mode 100644 index 00000000..49660db0 --- /dev/null +++ b/lib/core/pathlib/path_sid.v @@ -0,0 +1,24 @@ +module pathlib + +// import freeflowuniverse.herolib.core.smartid + +// // sids_acknowledge . +// // means our redis server knows about the sid's found, so we know which ones to generate new +// pub fn (mut path Path) sids_acknowledge(cid smartid.CID) ! { +// t := path.read()! +// cid.sids_acknowledge(t)! +// } + +// // sids_replace . +// // find parts of text in form sid:*** till sid:****** . +// // replace all occurrences with new sid's which are unique . +// // cid = is the circle id for which we find the id's . +// // sids will be replaced in the files if they are different +// pub fn (mut path Path) sids_replace(cid smartid.CID) ! { +// t := path.read()! +// t2 := cid.sids_replace(t)! +// if t2 != t { +// // means we have change and we need to write it +// path.write(t2)! +// } +// } diff --git a/lib/core/pathlib/path_size.v b/lib/core/pathlib/path_size.v new file mode 100644 index 00000000..3d00645b --- /dev/null +++ b/lib/core/pathlib/path_size.v @@ -0,0 +1,23 @@ +module pathlib + +import os + +pub fn (mut path Path) size_kb() !int { + s := path.size()! + return int(s / 1000) +} + +pub fn (mut path Path) size() !f64 { + path.check_exists()! + // console.print_header(' filesize: $path.path") + if path.cat == .file { + return os.file_size(path.path) + } else { + mut pl := path.list(recursive: true)! + mut totsize := 0.0 + for mut p in pl.paths { + totsize += p.size()! + } + return totsize + } +} diff --git a/lib/core/pathlib/path_subgetters.v b/lib/core/pathlib/path_subgetters.v new file mode 100644 index 00000000..45794a25 --- /dev/null +++ b/lib/core/pathlib/path_subgetters.v @@ -0,0 +1,314 @@ +module pathlib + +import freeflowuniverse.herolib.core.texttools +import os + +@[params] +pub struct SubGetParams { +pub mut: + name string + name_fix_find bool // means we will also find if name is same as the name_fix + name_fix bool // if file found and name fix was different than file on filesystem, will rename + dir_ensure bool // if dir_ensure on will fail if its not a dir + file_ensure bool // if file_ensure on will fail if its not a dir +} + +// An internal struct for representing failed jobs. +pub struct SubGetError { + Error +pub mut: + msg string + path string + error_type JobErrorType +} + +pub enum JobErrorType { + error + nodir + notfound + wrongtype // asked for dir or file, but found other type + islink +} + +pub fn (err SubGetError) msg() string { + mut msg := '' + if err.error_type == .nodir { + msg = 'could not get sub of path, because was no dir' + } + if err.error_type == .notfound { + msg = 'could not find' + } + if err.error_type == .wrongtype { + msg = 'asked for a dir or a file, but this did not correspond on filesystem.' + } + if err.error_type == .islink { + msg = 'we found a link, this is not supported for now.' + } + return "Dir Get Error for path:'${err.path}' -- (${err.code()}) failed with error: ${msg}" +} + +pub fn (err SubGetError) code() int { + return int(err.error_type) +} + +// will get dir or file underneith a dir . +// e.g. mypath.sub_get(name:"mysub_file.md",name_fix_find:true,name_fix:true)! . +// this will find Mysubfile.md as well as mysub_File.md and rename to mysub_file.md and open . +// params: . +// - name . +// - name_fix_find bool :means we will also find if name is same as the name_fix. +// - name_fix bool :if file found and name fix was different than file on filesystem, will rename . +// - dir_ensure bool :if dir_ensure on will fail if its not a dir . +// - file_ensure bool :if file_ensure on will fail if its not a dir . +// . +// will return SubGetError if error . +// +// returns a path +pub fn (mut path Path) sub_get(args_ SubGetParams) !Path { + mut args := args_ + if path.cat != Category.dir { + return SubGetError{ + error_type: .nodir + path: path.path + } + } + if args.name == '' { + return error('name cannot be empty') + } + if args.name_fix { + args.name_fix_find = true + } + if args.name_fix_find { + args.name = texttools.name_fix(args.name) + } + items := os.ls(path.path) or { []string{} } + for item in items { + mut itemfix := item + if args.name_fix_find { + itemfix = texttools.name_fix(item) + } + if itemfix == args.name { + // we found what we were looking for + mut p := get(os.join_path(path.path, item)) // get the path + if args.dir_ensure { + if !p.is_dir() { + return SubGetError{ + error_type: .wrongtype + path: path.path + } + } + } + if args.file_ensure { + if !p.is_file() { + return SubGetError{ + error_type: .wrongtype + path: path.path + } + } + } + if args.name_fix { + p.path_normalize() or { + return SubGetError{ + msg: 'could not normalize path: ${err}' + path: path.path + } + } + } + return p + } + } + return SubGetError{ + error_type: .notfound + path: path.path + } +} + +// will check if dir exists +// params: . +// - name +// - name_fix_find bool :means we will also find if name is same as the name_fix . +// - name_fix bool :if file found and name fix was different than file on filesystem, will rename . +// - dir_ensure bool :if dir_ensure on will fail if its not a dir . +// - file_ensure bool :if file_ensure on will fail if its not a dir . +// +pub fn (mut path Path) sub_exists(args_ SubGetParams) !bool { + _ := path.sub_get() or { + if err.code() == 2 { + return false // means did not exist + } + return err + } + return true + // TODO: need to write test for sub_get and sub_exists +} + +//////////////FILE + +// find file underneith dir path, if exists return True +pub fn (path Path) file_exists(tofind string) bool { + if path.cat != Category.dir { + return false + } + if os.exists('${path.path}/${tofind}') { + if os.is_file('${path.path}/${tofind}') { + return true + } + } + return false +} + +// is case insensitive +pub fn (mut path Path) file_exists_ignorecase(tofind string) bool { + return path.file_name_get_ignorecase(tofind) != '' +} + +fn (mut path Path) file_name_get_ignorecase(tofind string) string { + if path.cat != Category.dir { + return '' + } + files := os.ls(path.path) or { []string{} } + for item in files { + if tofind.to_lower() == item.to_lower() { + file_path := os.join_path(path.path, item) + if os.is_file(file_path) { + return item + } + } + } + return '' +} + +// find file underneith path, if exists return as Path, otherwise error . +pub fn (mut path Path) file_get(tofind string) !Path { + if path.cat != Category.dir || !(path.exists()) { + return error('File get for ${tofind} in ${path.path}: is not a dir or dir does not exist.') + } + if path.file_exists(tofind) { + file_path := os.join_path(path.path, tofind) + return Path{ + path: file_path + cat: Category.file + exist: .yes + } + } + return error("Could not find file '${tofind}' in ${path.path}.") +} + +pub fn (mut path Path) file_get_ignorecase(tofind string) !Path { + if path.cat != Category.dir || !(path.exists()) { + return error('File get ignore case for ${tofind} in ${path.path}: is not a dir or dir does not exist.') + } + filename := path.file_name_get_ignorecase(tofind) + if filename == '' { + return error("Could not find file (igore case) '${tofind}' in ${path.path}.") + } + file_path := os.join_path(path.path, filename) + return Path{ + path: file_path + cat: Category.file + exist: .yes + } +} + +// get file, if not exist make new one +pub fn (mut path Path) file_get_new(tofind string) !Path { + if path.cat != Category.dir || !(path.exists()) { + return error('File get new for ${tofind} in ${path.path}: is not a dir or dir does not exist.') + } + mut p := path.file_get(tofind) or { + return get_file(path: '${path.path}/${tofind}', create: true)! + } + return p +} + +//////////////LINK + +// find link underneith path, if exists return True +// is case insensitive +pub fn (mut path Path) link_exists(tofind string) bool { + if path.cat != Category.dir { + return false + } + // TODO: need to check, if this is correct, make test + if os.exists('${path.path}/${tofind}') { + if os.is_link('${path.path}/${tofind}') { + return true + } + } + return false +} + +// find link underneith path, if exists return True +// is case insensitive +pub fn (mut path Path) link_exists_ignorecase(tofind string) bool { + if path.cat != Category.dir { + return false + } + files := os.ls(path.path) or { []string{} } + if tofind.to_lower() in files.map(it.to_lower()) { + file_path := os.join_path(path.path.to_lower(), tofind.to_lower()) + if os.is_link(file_path) { + return true + } + } + return false +} + +// find link underneith path, return as Path, can only be one +// tofind is part of link name +pub fn (mut path Path) link_get(tofind string) !Path { + if path.cat != Category.dir || !(path.exists()) { + return error('Link get for ${tofind} in ${path.path}: is not a dir or dir does not exist.') + } + if path.link_exists(tofind) { + file_path := os.join_path(path.path, tofind) + return Path{ + path: file_path + cat: Category.linkfile + exist: .yes + } + } + return error("Could not find link '${tofind}' in ${path.path}.") +} + +///////// DIR + +// find dir underneith path, if exists return True +pub fn (mut path Path) dir_exists(tofind string) bool { + if path.cat != Category.dir { + return false + } + if os.exists('${path.path}/${tofind}') { + if os.is_dir('${path.path}/${tofind}') { + return true + } + } + return false +} + +// find dir underneith path, return as Path +pub fn (mut path Path) dir_get(tofind string) !Path { + if path.cat != Category.dir || !(path.exists()) { + return error('is not a dir or dir does not exist: ${path.path}') + } + if path.dir_exists(tofind) { + dir_path := os.join_path(path.path, tofind) + return Path{ + path: dir_path + cat: Category.dir + exist: .yes + } + } + return error('${tofind} is not in ${path.path}') +} + +// get file, if not exist make new one +pub fn (mut path Path) dir_get_new(tofind string) !Path { + if path.cat != Category.dir || !(path.exists()) { + return error('is not a dir or dir does not exist: ${path.path}') + } + mut p := path.dir_get(tofind) or { + return get_dir(path: '${path.path}/${tofind}', create: true)! + } + return p +} diff --git a/lib/core/pathlib/path_tools.v b/lib/core/pathlib/path_tools.v new file mode 100644 index 00000000..072b4f70 --- /dev/null +++ b/lib/core/pathlib/path_tools.v @@ -0,0 +1,549 @@ +module pathlib + +import os +import freeflowuniverse.herolib.core.texttools +import time +import crypto.md5 +import rand +import freeflowuniverse.herolib.ui.console + +// check path exists +pub fn (mut path Path) exists() bool { + // if path.cat == .unknown || path.exist == .unknown { + // path.check() + // } + path.check() + return path.exist == .yes +} + +// case insentive check on paths +pub fn path_equal(a_ string, b_ string) bool { + a := os.abs_path(a_.replace('~', os.home_dir())).to_lower() + b := os.abs_path(b_.replace('~', os.home_dir())).to_lower() + return a == b +} + +// rename the file or directory +pub fn (mut path Path) rename(name string) ! { + if name.contains('/') { + return error("should only be a name no dir inside: '${name}'") + } + mut dest := '' + if path.path.contains('/') { + before := path.path.all_before_last('/') + dest = before + '/' + name + } else { + dest = name + } + os.mv(path.path, dest)! + path.path = dest + path.check() +} + +// TODO: make part of pathlib of Path + +// uncompress to specified directory . +// if copy then will keep the original +pub fn (mut path Path) expand(dest string) !Path { + $if debug { + console.print_header('expand ${path.path}') + } + if dest.len < 4 { + return error("Path dest needs to be mentioned and +4 char. Now '${dest}'") + } + filext := os.file_ext(path.name()).to_lower() + + // the ones who return a filepath + if filext == '.xz' { + cmd := 'xz --decompress ${path.path} --stdout > ${dest}' + if os.is_file(dest) { + os.rm(dest)! + } + os.mkdir_all(dest)! + os.rmdir(dest)! + + res := os.execute(cmd) + // console.print_debug(res) + if res.exit_code > 0 { + // console.print_debug(cmd) + return error('Could not expand xz.\n${res}') + } + return get_file(path: dest, create: false)! + } + + mut desto := get_dir(path: dest, create: true)! + desto.empty()! + + if path.name().to_lower().ends_with('.tar.gz') || path.name().to_lower().ends_with('.tgz') { + cmd := 'tar -xzvf ${path.path} -C ${desto.path}' + console.print_debug(cmd) + res := os.execute(cmd) + if res.exit_code > 0 { + return error('Could not expand.\n${res}') + } + } else if path.name().to_lower().ends_with('.zip') { + cmd := 'unzip ${path.path} -d ${dest}' + // console.print_debug(cmd) + res := os.execute(cmd) + // console.print_debug(res) + if res.exit_code > 0 { + return error('Could not expand zip.\n${res}') + } + } else if path.name().to_lower().ends_with('.bz2') { + cmd := ' + bunzip2 -f -k ${path.path} + ' // console.print_debug(cmd) + + res := os.execute(cmd) + if res.exit_code > 0 { + return error('Could not expand bz2.\n${res.output}') + } + dest_tmp := path.path.all_before_last('.bz2') + desto.delete()! + mut desto2 := get_file(path: dest, create: false)! + os.mv(dest_tmp, desto2.path)! + return desto2 + } else { + panic('expand not implemented yet for : ${path.path}') + } + return desto +} + +// chown changes the owner and group attributes of path to owner and group. +pub fn (mut path Path) chown(owner int, group int) ! { + os.chown(path.path, owner, group)! +} + +// chmod change file access attributes of path to mode. +// Octals like 0o600 can be used. +pub fn (mut path Path) chmod(mode int) ! { + os.chmod(path.path, mode)! +} + +// get relative path in relation to destpath . +// will not resolve symlinks +pub fn (path Path) path_relative(destpath string) !string { + // console.print_header(' path relative: '$path.path' '$destpath'") + return path_relative(destpath, path.path) +} + +// recursively finds the least common ancestor of array of paths . +// will always return the absolute path (relative gets changed to absolute). +pub fn find_common_ancestor(paths_ []string) string { + for p in paths_ { + if p.trim_space() == '' { + panic('cannot find commone ancestors if any of items in paths is empty.\n${paths_}') + } + } + paths := paths_.map(os.abs_path(os.real_path(it))) // get the real path (symlinks... resolved) + console.print_debug(paths.str()) + parts := paths[0].split('/') + mut totest_prev := '/' + for i in 1 .. parts.len { + totest := parts[0..i + 1].join('/') + if paths.any(!it.starts_with(totest)) { + return totest_prev + } + totest_prev = totest + } + return totest_prev +} + +// same as above but will treat symlinks as if normal links +// allowing finding relative paths between links as well +// QUESTION: should we merge with above? +pub fn find_simple_common_ancestor(paths_ []string) string { + for p in paths_ { + if p.trim_space() == '' { + panic('cannot find commone ancestors if any of items in paths is empty.\n${paths_}') + } + } + paths := paths_.map(os.abs_path(it)) + parts := paths[0].split('/') + mut totest_prev := '/' + for i in 1 .. parts.len { + totest := parts[0..i + 1].join('/') + if paths.any(!it.starts_with(totest)) { + return totest_prev + } + totest_prev = totest + } + return totest_prev +} + +// find parent of path +pub fn (path Path) parent() !Path { + mut p := path.absolute() + parent := os.dir(p) // get parent directory + if parent == '.' || parent == '/' { + return error('no parent for path ${path.path}') + } else if parent == '' { + return Path{ + path: '/' + cat: Category.dir + exist: .unknown + } + } + return Path{ + path: parent + cat: Category.dir + exist: .unknown + } +} + +pub struct MoveArgs { +pub mut: + dest string // path + delete bool // if true will remove files which are on dest which are not on source + chmod_execute bool +} + +// move to other location +// ``` +// dest string // path +// delete bool // if true will remove files which are on dest which are not on source +// ``` +pub fn (mut path Path) move(args MoveArgs) ! { + mut d := get(args.dest) + if d.exists() { + if args.delete { + d.delete()! + } else { + return error("Found dest dir in move and can't delete. \n${args}") + } + } + os.mv(path.path, d.path)! + if args.chmod_execute { + d.chmod(0o770)! + } +} + +// the path will move itself up 1 level . +// e.g. path is /tmp/rclone and there is /tmp/rclone/rclone-v1.64.2-linux-amd64 . +// that last dir needs to move 1 up +pub fn (mut path Path) moveup_single_subdir() ! { + mut plist := path.list(recursive: false, ignoredefault: true, dirs_only: true)! + console.print_debug(plist.str()) + if plist.paths.len != 1 { + return error('could not find one subdir in ${path.path} , so cannot move up') + } + mut pdest := plist.paths[0] + pdest.moveup()! +} + +// the path will move itself up 1 level . +// the e.g. /tmp/rclone/rclone-v1.64.2-linux-amd64/ -> /tmp/rclone +pub fn (mut path Path) moveup() ! { + console.print_stdout('move up: ${path}') + pdest := path.parent()! + tmpdir := '${os.temp_dir()}/${rand.u16()}' + path.move(dest: tmpdir, delete: true)! + mut tmpdirpath := get_dir(path: tmpdir)! + tmpdirpath.move(dest: pdest.path, delete: true)! + path.path = pdest.path + path.check() +} + +// returns extension without . +pub fn (path Path) extension() string { + return os.file_ext(path.path).trim('.') +} + +// returns extension without and all lower case +pub fn (path Path) extension_lower() string { + return path.extension().to_lower() +} + +// will rewrite the path to lower_case if not the case yet +// will also remove weird chars +// if changed will return true +// the file will be moved to the new location +pub fn (mut path Path) path_normalize() !bool { + path_original := path.path + '' // make sure is copy, needed? + + // if path.cat == .file || path.cat == .dir || !path.exists() { + // return error('path $path does not exist, cannot namefix (only support file and dir)') + // } + + if path.extension().to_lower() == 'jpeg' { + path.path = path.path_no_ext() + '.jpg' + } + + namenew := texttools.name_fix_keepext(path.name()) + if namenew != path.name() { + path.path = os.join_path(os.dir(path.path), namenew) + } + + if path.path != path_original { + os.mv(path_original, path.path)! + path.check() + return true + } + return false +} + +// walk upwards starting from path untill dir or file tofind is found +// works recursive +pub fn (path Path) parent_find(tofind string) !Path { + if os.exists(os.join_path(path.path, tofind)) { + return path + } + path2 := path.parent()! + return path2.parent_find(tofind) +} + +// delete +pub fn (mut path Path) rm() ! { + return path.delete() +} + +// delete +pub fn (mut path Path) delete() ! { + if path.exists() { + // console.print_debug("exists: $path") + match path.cat { + .file, .linkfile, .linkdir { + os.rm(path.path.replace('//', '/'))! + } + .dir { + os.rmdir_all(path.path)! + } + .unknown { + return error('Path cannot be unknown type') + } + } + path.exist = .no + } + if os.is_link(path.path) { + os.rm(path.path.replace('//', '/'))! + } +} + +// remove all content but if dir let the dir exist +pub fn (mut path Path) empty() ! { + if path.cat == .dir { + os.mkdir_all(path.path)! + path.exist = .yes + mut list := path.list()! + for mut subpath in list.paths { + subpath.delete()! + } + } else if path.cat == Category.linkfile { + mut p2 := path.getlink()! + p2.empty()! + } else { + path.write('')! + } +} + +// write content to the file, check is file +// if the path is a link to a file then will change the content of the file represented by the link +pub fn (mut path Path) write(content string) ! { + if !os.exists(path.path_dir()) { + os.mkdir_all(path.path_dir())! + } + if path.exists() && path.cat == Category.linkfile { + mut pathlinked := path.getlink()! + pathlinked.write(content)! + } + if path.exists() && path.cat != Category.file && path.cat != Category.linkfile { + return error('Path must be a file for ${path}') + } + os.write_file(path.path, content)! +} + +// write bytes to file +pub fn (mut path Path) writeb(content []u8) ! { + if !os.exists(path.path_dir()) { + os.mkdir_all(path.path_dir())! + } + if path.exists() && path.cat == Category.linkfile { + mut pathlinked := path.getlink()! + pathlinked.writeb(content)! + } + + if path.exists() && path.cat != Category.file && path.cat != Category.linkfile { + return error('Path must be a file for ${path}') + } + + os.write_file_array(path.path, content)! +} + +// read content from file +pub fn (mut path Path) read() !string { + path.check() + match path.cat { + .file, .linkfile { + p := path.absolute() + if !os.exists(p) { + return error('File is not exist, ${p} is a wrong path') + } + return os.read_file(p) + } + else { + return error('Path is not a file when reading. ${path.path}') + } + } +} + +// read bytes from file +pub fn (mut path Path) readb() ![]u8 { + path.check() + match path.cat { + .file, .linkfile { + p := path.absolute() + if !os.exists(p) { + return error('File does not exist, ${p} is a wrong path') + } + return os.read_bytes(p) + } + else { + return error('Path is not a file when reading. ${path.path}') + } + } +} + +// recalc path between target & source . +// we only support if source_ is an existing dir, links will not be supported . +// a0 := pathlib.path_relative('$testpath/a/b/c', '$testpath/a/d.txt') or { panic(err) } . +// assert a0 == '../../d.txt' . +// a2 := pathlib.path_relative('$testpath/a/b/c', '$testpath/d.txt') or { panic(err) } . +// assert a2 == '../../../d.txt' . +// a8 := pathlib.path_relative('$testpath/a/b/c', '$testpath/a/b/c/d/e/e.txt') or { panic(err) } . +// assert a8 == 'd/e/e.txt' . +// symlinks will not be resolved, as it leads to unexpected behaviour +pub fn path_relative(source_ string, linkpath_ string) !string { + mut source := os.abs_path(source_) + mut linkpath := os.abs_path(linkpath_) + // now both start with / + + mut p := get(source_) + + // converts file source to dir source + if source.all_after_last('/').contains('.') { + source = source.all_before_last('/') + p = p.parent() or { return error("Parent of source ${source_} doesn't exist") } + } + p.check() + + if p.cat != .dir && p.cat != .linkdir { + return error('Cannot do path_relative()! if source is not a dir Now:${source_} is ${p.cat}') + } else if !p.exists() { + return error('Cannot do path_relative()! if source doesnt exist. Now:${source_}') + } + + common := find_simple_common_ancestor([source, linkpath]) + + // if source is common, returns source + if source.len <= common.len + 1 { + // TODO: this should be safer + path := linkpath_.trim_string_left(source) + if path.starts_with('/') { + return path[1..] + } else { + return path + } + } + + mut source_short := source[(common.len)..] + mut linkpath_short := linkpath[(common.len)..] + + source_short = source_short.trim_string_left('/') + linkpath_short = linkpath_short.trim_string_left('/') + + // console.print_stdout('source: ${source_short}') + // console.print_stdout('link: ${linkpath_short}') + + source_count := source_short.count('/') + // link_count := linkpath_short.count('/') + // console.print_debug(" + source_short:$source_short ($source_count)") + // console.print_debug(" + linkpath_short:$linkpath_short ($link_count)") + mut dest := '' + + if source_short == '' { // source folder is common ancestor + dest = linkpath_short + } else { + go_up := ['../'].repeat(source_count + 1).join('') + dest = '${go_up}${linkpath_short}' + } + + dest = dest.replace('//', '/') + return dest +} + +@[params] +pub struct TMPWriteArgs { +pub mut: + name string // optional name to remember it more easily + tmpdir string + text string // text to put in file + path string // to overrule the path where script will be stored + ext string = 'sh' +} + +// write temp file and return path +pub fn temp_write(args_ TMPWriteArgs) !string { + mut args := args_ + + if args.path.len == 0 { + if args.tmpdir.len == 0 { + if 'TMPDIR' in os.environ() { + args.tmpdir = os.environ()['TMPDIR'] or { '/tmp' } + } else { + args.tmpdir = '/tmp' + } + } + mut t := time.now().format_ss_milli().replace(' ', '-').replace('.', ':') + texthash := md5.hexhash(args.text) + t += '_${texthash}' + mut tmppath := '${args.tmpdir}/execscripts/${t}.${args.ext}' + if args.name.len > 0 { + tmppath = '${args.tmpdir}/execscripts/${args.name}_${t}.${args.ext}' + } + + if !os.exists('${args.tmpdir}/execscripts/') { + os.mkdir('${args.tmpdir}/execscripts') or { + return error('Cannot create ${args.tmpdir}/execscripts,${err}') + } + } + if os.exists(tmppath) { + for i in 1 .. 200 { + // console.print_debug(i) + tmppath = '${args.tmpdir}/execscripts/{${t}}_${i}.${args.ext}' + if !os.exists(tmppath) { + break + } + // TODO: would be better to remove older files, e.g. if older than 1 day, remove + if i > 99 { + // os.rmdir_all('$tmpdir/execscripts')! + // return temp_write(text) + panic("should not get here, can't find temp file to write for process job.") + } + } + } + args.path = tmppath + } + os.write_file(args.path, args.text)! + os.chmod(args.path, 0o777)! + return args.path +} + +// pub fn path_relative(source_ string, dest_ string) !string { +// mut source := source_.trim_right('/') +// mut dest := dest_.replace('//', '/').trim_right('/') +// // console.print_debug("path relative: '$source' '$dest' ") +// if source !="" { +// if source.starts_with('/') && !dest.starts_with('/') { +// return error('if source starts with / then dest needs to start with / as well.\n - $source\n - $dest') +// } +// if !source.starts_with('/') && dest.starts_with('/') { +// return error('if source starts with / then dest needs to start with / as well\n - $source\n - $dest') +// } +// } +// if dest.starts_with(source) { +// return dest[source.len..] +// } else { +// msg := "Destination path is not in source directory: $source_ $dest_" +// return error(msg) +// } +// } diff --git a/lib/core/pathlib/path_tools_test.v b/lib/core/pathlib/path_tools_test.v new file mode 100644 index 00000000..9fbdcf2f --- /dev/null +++ b/lib/core/pathlib/path_tools_test.v @@ -0,0 +1,223 @@ +import freeflowuniverse.herolib.core.pathlib +import os +import freeflowuniverse.herolib.ui.console + +const testpath = os.dir(@FILE) + '/examples/test_path' + +fn testsuite_begin() { + os.rmdir_all(testpath) or {} + assert !os.is_dir(testpath) + os.mkdir_all(testpath) or { panic(err) } + os.mkdir_all('${testpath}/test_parent') or { panic(err) } + os.mkdir_all('${testpath}/a/b/c') or { panic(err) } + os.create('${testpath}/testfile1') or { panic(err) } + os.create('${testpath}/test_parent/testfile2') or { panic(err) } + os.create('${testpath}/test_parent/testfile3') or { panic(err) } +} + +fn testsuite_end() { + os.rmdir_all(testpath) or {} +} + +fn test_get() { + console.print_stdout('************ TEST_Get ************') + console.print_debug(testpath) + fp := pathlib.get('${testpath}/testfile1') + assert fp.cat == pathlib.Category.file + console.print_stdout('File Result: ${fp}') + dp := pathlib.get('${testpath}') + assert dp.cat == pathlib.Category.dir + console.print_stdout('Dir Result: ${dp}') +} + +fn test_exists() { + console.print_stdout('************ TEST_exists ************') + mut p1 := pathlib.get_file(path: '${testpath}/testfile1') or { panic('${err}') } + assert p1.exists() + console.print_stdout('File found') + mut p2 := pathlib.get_file(path: '${testpath}/NotARealFile') or { panic('${err}') } + assert !p2.exists() + console.print_stdout('File not found') + mut p3 := pathlib.get_file(path: '${testpath}/NotARealFile2', create: true) or { + panic('${err}') + } + assert p3.exists() + console.print_stdout('File found') + p3.delete() or { panic('${err}') } + assert !p3.exists() +} + +fn test_parent() { + console.print_stdout('************ TEST_test_parent ************') + mut test_path_dir := pathlib.get('${testpath}') + mut p := pathlib.get('${testpath}/testfile1') + parent_dir := p.parent() or { panic(err) } + assert parent_dir.path == test_path_dir.path + console.print_stdout('Parent Function working correctly') +} + +fn test_parent_find() { + console.print_stdout('************ TEST_test_parent_find ************') + // - testfile1 is located in test_path + // - will start search from test_parent that is inside test_path + // - Result must be test_path + mut test_path_dir := pathlib.get('${testpath}') + mut p := pathlib.get('${testpath}/test_parent') + parent_dir := p.parent_find('testfile1') or { panic(err) } + assert parent_dir.path == test_path_dir.path + console.print_stdout('Find Parent Function working correctly') +} + +fn test_dir_exists() { + console.print_stdout('************ TEST_dir_exists ************') + mut test_path_dir := pathlib.get('${testpath}') + assert test_path_dir.dir_exists('test_parent') + console.print_stdout('test_parent found in ${test_path_dir.path}') + assert !test_path_dir.dir_exists('test_parent_2') + console.print_stdout('test_paren_2 not found in ${test_path_dir.path}') +} + +fn test_dir_find() { + console.print_stdout('************ TEST_dir_find ************') + mut test_path_dir := pathlib.get('${testpath}') + mut test_parent_dir := test_path_dir.dir_get('test_parent') or { panic(err) } + console.print_stdout('Dir found: ${test_parent_dir}') + mut test_parent_dir2 := test_path_dir.dir_get('test_parent_2') or { return } + panic('should not get here') +} + +fn testfile1_exists() { + console.print_stdout('************ testfile1_exists ************') + mut test_path_dir := pathlib.get('${testpath}') + assert test_path_dir.file_exists('testfile1') + console.print_stdout('testfile1 found in ${test_path_dir.path}') + + assert !test_path_dir.file_exists('newfile2') + console.print_stdout('newfile2 not found in ${test_path_dir.path}') +} + +fn testfile1_find() { + console.print_stdout('************ testfile1_find ************') + mut test_path_dir := pathlib.get('${testpath}') + mut file := test_path_dir.file_get('testfile1') or { panic(err) } + console.print_stdout('file ${file} found') + test_path_dir.file_get('newfile2') or { return } + panic('should not get here') +} + +fn test_real_path() { + console.print_stdout('************ TEST_real_path ************') + mut source := pathlib.get('${testpath}/test_parent/testfile2') + mut dest_ := '${testpath}/link_remove_rp.md' + mut link := source.link(dest_, true) or { panic('error: ${err}') } + mut dest := pathlib.get(dest_) + link_real := dest.realpath() + assert link_real == '${testpath}/test_parent/testfile2' + // dest.delete() or {panic(err)} + console.print_stdout('Real path function working correctly') +} + +fn test_real_path2() { + console.print_stdout('************ TEST_real_path ************') + mut source := pathlib.get('${testpath}/testfile1') + mut dest_ := '${testpath}/test_parent/link_remove_rp2.md' + mut link := source.link(dest_, true) or { panic('error: ${err}') } + mut dest := pathlib.get(dest_) + link_real := dest.realpath() + assert link_real == '${testpath}/testfile1' + dest.delete() or { panic(err) } + console.print_stdout('Real path2 function working correctly') +} + +fn test_link_path_relative() { + os.mkdir_all('${testpath}/a/b/c') or { panic(err) } + console.print_stdout('************ TEST_link_path_relative()! ************') + a0 := pathlib.path_relative('${testpath}/a/b/c', '${testpath}/a/d.txt') or { panic(err) } + assert a0 == '../../d.txt' + a2 := pathlib.path_relative('${testpath}/a/b/c', '${testpath}/d.txt') or { panic(err) } + assert a2 == '../../../d.txt' + a3 := pathlib.path_relative('${testpath}/a/b/c', '${testpath}/a/b/c/e.txt') or { panic(err) } + assert a3 == 'e.txt' // ? is this the correct path? + a4 := pathlib.path_relative('${testpath}/a/b/c/', '${testpath}/a/b/d/e.txt') or { panic(err) } + assert a4 == '../d/e.txt' + a5 := pathlib.path_relative('${testpath}/a/b/c', '${testpath}/a/b/c/d/e/e.txt') or { + panic(err) + } + assert a5 == 'd/e/e.txt' + a6 := pathlib.path_relative('${testpath}/a/b/c', '${testpath}/a/b/c/e.txt') or { panic(err) } + assert a6 == 'e.txt' + a7 := pathlib.path_relative('${testpath}/a/b/c', '${testpath}/a/b/c/e.txt') or { panic(err) } + assert a7 == 'e.txt' + a8 := pathlib.path_relative('${testpath}/a/b/c', '${testpath}/a/b/c/d/e/e.txt') or { + panic(err) + } + assert a8 == 'd/e/e.txt' + + // TODO: lets make to work in test setup + // c := pathlib.path_relative('/Users/despiegk1/code4/books/content/mytwin/intro','/Users/despiegk1/code4/books/content/mytwin/funny_comparison.md') or {panic(err)} + // assert c=="../funny_comparison.md" + // d := pathlib.path_relative('/Users/despiegk1/code4/books/content/mytwin/intro/','/Users/despiegk1/code4/books/content/mytwin/funny_comparison.md') or {panic(err)} + // assert d=="../funny_comparison.md" + + console.print_stdout('Link path relative function working correctly') +} + +// TODO need to enable all tests +// TODO have more than 1 test file, make more modular, now its 1 too big file + +fn test_write_and_read() { + console.print_stdout('************ TEST_write_and_read ************') + mut fp := pathlib.get('${testpath}/testfile1') + fp.write('Test Write Function') or { panic(err) } + fcontent := fp.read() or { panic(err) } + assert fcontent == 'Test Write Function' + console.print_stdout('Write and read working correctly') + + // mut test_path_dir := pathlib.get("$testpath") +} + +// fn test_copy() { +// console.print_stdout('************ TEST_copy ************') +// //- Copy /test_path/testfile1 to /test_path/test_parent +// mut dest_dir := pathlib.get('${testpath}/test_parent') +// mut src_f := pathlib.get('${testpath}/testfile1') +// src_f.copy(dest: '${dest_dir.path}/testfile2') or { panic(err) } +// mut dest_file := pathlib.get('${testpath}/test_parent/testfile2') +// dest_file.delete()! +// console.print_stdout('Copy function works correctly') +// } + +// TODO need other test +// fn test_link(){ +// console.print_stdout('************ TEST_link ************') +// mut dest_p:= path.path{path:"$testpath/linkdir1", cat:pathlib.Category.linkdir, exists:path.false} +// mut lp := path.path{path:"/workspace/herolib/path", cat:pathlib.Category.dir, exists:path.true} +// lp.link(mut dest_p) or {panic(err)} +// mut get_link := pathlib.get("$testpath/linkdir1") +// assert get_link.exists() +// console.print_debug("Link path: $get_link.path") +// real:= get_link.absolute() +// console.print_debug("Real path: $real") +// } + +fn test_find_common_ancestor() { + console.print_stdout('************ TEST_find_common_ancestor ************') + res := pathlib.find_common_ancestor(['/test/a/b/c/d', '/test/a/']) + assert res == '/test/a' + + b1 := pathlib.find_common_ancestor(['/a/b/c/d.txt', '/a/d.txt']) + assert b1 == '/a' + + b2 := pathlib.find_common_ancestor(['/a/b/c/d.txt', '/c/d.txt']) + assert b2 == '/' + + b3 := pathlib.find_common_ancestor(['/a/b/c/d.txt', '/a/b/c/e.txt']) + assert b3 == '/a/b/c' + + b4 := pathlib.find_common_ancestor(['/a/b/c/d.txt', '/a/b/c/d.txt']) + assert b4 == '/a/b/c/d.txt' + + b7 := pathlib.find_common_ancestor(['/', '/a/b/c/d.txt']) + assert b7 == '/' + console.print_stdout('Find common ancestor function works correctly') +} diff --git a/lib/core/pathlib/readme.md b/lib/core/pathlib/readme.md new file mode 100644 index 00000000..c8f8522f --- /dev/null +++ b/lib/core/pathlib/readme.md @@ -0,0 +1,81 @@ +# Pathlib Module + +The pathlib module provides a robust way to handle file system operations. Here's a comprehensive overview of how to use it: + +## 1. Basic Path Creation + +```v +import freeflowuniverse.herolib.core.pathlib + +// Get a basic path object +mut path := pathlib.get('/some/path') + +// Create a directory (with parent dirs) +mut dir := pathlib.get_dir( + path: '/some/dir' + create: true +)! + +// Create/get a file +mut file := pathlib.get_file( + path: '/some/file.txt' + create: true +)! +``` + +## 2. Path Properties and Operations + +```v +// Get various path forms +abs_path := path.absolute() // Full absolute path +real_path := path.realpath() // Resolves symlinks +short_path := path.shortpath() // Uses ~ for home dir + +// Get path components +name := path.name() // Filename with extension +name_no_ext := path.name_no_ext() // Filename without extension +dir_path := path.path_dir() // Directory containing the path + +// Check path properties +if path.exists() { /* exists */ } +if path.is_file() { /* is file */ } +if path.is_dir() { /* is directory */ } +if path.is_link() { /* is symlink */ } +``` + +## 3. Common File Operations + +```v +// Empty a directory +mut dir := pathlib.get_dir( + path: '/some/dir' + empty: true +)! + +// Delete a path +mut path := pathlib.get_dir( + path: '/path/to/delete' + delete: true +)! + +// Get working directory +mut wd := pathlib.get_wd() +``` + +## Features + +The module handles common edge cases: +- Automatically expands ~ to home directory +- Creates parent directories as needed +- Provides proper error handling with V's result type +- Checks path existence and type +- Handles both absolute and relative paths + +## Path Object Structure + +Each Path object contains: +- `path`: The actual path string +- `cat`: Category (file/dir/link) +- `exist`: Existence status + +This provides a safe and convenient API for all file system operations in V. diff --git a/lib/core/pathlib/template.v b/lib/core/pathlib/template.v new file mode 100644 index 00000000..b382a42b --- /dev/null +++ b/lib/core/pathlib/template.v @@ -0,0 +1,21 @@ +module pathlib + +import freeflowuniverse.herolib.core.texttools +import os +import freeflowuniverse.herolib.ui.console + +// template is the text coming from template engine. +pub fn template_write(template_ string, dest string, overwrite bool) ! { + mut template := texttools.template_replace(template_) + if overwrite || !(os.exists(dest)) { + mut p := get_file(path: dest, create: true)! + $if debug { + console.print_header(" write template to '${dest}'") + } + p.write(template)! + } +} + +pub fn (mut path Path) template_write(template_ string, overwrite bool) ! { + template_write(template_, path.path, overwrite)! +} diff --git a/lib/core/playbook/readme.md b/lib/core/playbook/readme.md index ca399547..c6b6a4e8 100644 --- a/lib/core/playbook/readme.md +++ b/lib/core/playbook/readme.md @@ -20,7 +20,7 @@ import freeflowuniverse.herolib.core.playcmds // session ?&base.Session is optional mut plbook := playbook.new(path: "....")! -//now we run all the commands as they are pre-defined in crystallib (herolib) +//now we run all the commands as they are pre-defined in herolib (herolib) playcmds.run(mut plbook)! diff --git a/lib/core/texttools/array.v b/lib/core/texttools/array.v new file mode 100644 index 00000000..cc03e00d --- /dev/null +++ b/lib/core/texttools/array.v @@ -0,0 +1,77 @@ +module texttools + +// a comma or \n separated list gets converted to a list of strings . +//'..' also gets converted to without '' +// check also splitsmart which is more intelligent +pub fn to_array(r string) []string { + mut res := []string{} + mut r2 := dedent(r) + r2 = r2.replace(',', '\n') + + for mut line in r2.split_into_lines() { + line = line.trim_space() + if line.trim('\'"') == '' { + continue + } + res << line.trim("'") + } + return res +} + +pub fn to_array_int(r string) []int { + mut r2 := to_array(r).map(it.int()) + return r2 +} + +// intelligent way how to map a line to a map +//``` +// r:=texttools.to_map("name,-,-,-,-,pid,-,-,-,-,path", +// "root 304 0.0 0.0 408185328 1360 ?? S 16Dec23 0:34.06 /usr/sbin/distnoted\n \n") +// assert {'name': 'root', 'pid': '1360', 'path': '/usr/sbin/distnoted'} == r + +// r2:=texttools.to_map("name,-,-,-,-,pid,-,-,-,-,path", +// "root 304 0.0 0.0 408185328 1360 ?? S 16Dec23 0:34.06 /usr/sbin/distnoted anotherone anotherone\n \n") +// assert {'name': 'root', 'pid': '1360', 'path': '/usr/sbin/distnoted'} == r2 + +// r3:=texttools.to_map("name,-,-,-,-,pid,-,-,-,-,path", +// "root 304 0.0 0.0 408185328 1360 ?? S 16Dec23 0:34.06 \n \n") +// assert {'name': 'root', 'pid': '1360', 'path': ''} == r3 +//``` +pub fn to_map(mapstring string, line string, delimiter_ string) map[string]string { + mapstring_array := split_smart(mapstring, '') + mut line_array := split_smart(line, '') + mut result := map[string]string{} + for x in 0 .. mapstring_array.len { + mapstring_item := mapstring_array[x] or { '' } + if mapstring_item != '-' { + result[mapstring_item] = line_array[x] or { '' } + } + } + return result +} + +// smart way how to get useful info out of text block +// ``` +// t:=' +// _cmiodalassistants 304 0.0 0.0 408185328 1360 ?? S 16Dec23 0:34.06 /usr/sbin/distnoted agent +// _locationd 281 0.0 0.0 408185328 1344 ?? S 16Dec23 0:35.80 /usr/sbin/distnoted agent + +// root 275 0.0 0.0 408311904 7296 ?? Ss 16Dec23 2:00.56 /usr/libexec/storagekitd +// _coreaudiod 268 0.0 0.0 408185328 1344 ?? S 16Dec23 0:35.49 /usr/sbin/distnoted agent +// ' + +// r4:=texttools.to_list_map("name,-,-,-,-,pid,-,-,-,-,path",t) +// assert [{'name': '_cmiodalassistants', 'pid': '1360', 'path': '/usr/sbin/distnoted'}, +// {'name': '_locationd', 'pid': '1344', 'path': '/usr/sbin/distnoted'}, +// {'name': 'root', 'pid': '7296', 'path': '/usr/libexec/storagekitd'}, +// {'name': '_coreaudiod', 'pid': '1344', 'path': '/usr/sbin/distnoted'}] == r4 +// ``` +pub fn to_list_map(mapstring string, txt_ string, delimiter_ string) []map[string]string { + mut result := []map[string]string{} + mut txt := remove_empty_lines(txt_) + txt = dedent(txt) + for line in txt.split_into_lines() { + result << to_map(mapstring, line, delimiter_) + } + return result +} diff --git a/lib/core/texttools/clean.v b/lib/core/texttools/clean.v new file mode 100644 index 00000000..484b6a3b --- /dev/null +++ b/lib/core/texttools/clean.v @@ -0,0 +1,103 @@ +// make sure that the names are always normalized so its easy to find them back +module texttools + +const ignore_for_name = '\\/[]()?!@#$%^&*<>:;{}|~' + +const keep_ascii = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()_-+={}[]"\':;?/>.<,|\\~` ' + +pub fn name_clean(r string) string { + mut res := []string{} + for ch in r { + mut c := ch.ascii_str() + if ignore_for_name.contains(c) { + continue + } + res << c + } + return res.join('') +} + +// remove all chars which are not ascii +pub fn ascii_clean(r string) string { + mut res := []string{} + for ch in r { + mut c := ch.ascii_str() + if keep_ascii.contains(c) { + res << c + } + } + return res.join('') +} + +// https://en.wikipedia.org/wiki/Unicode#Standardized_subsets + +pub fn remove_empty_lines(text string) string { + mut out := []string{} + for l in text.split_into_lines() { + if l.trim_space() == '' { + continue + } + out << l + } + return out.join('\n') +} + +pub fn remove_double_lines(text string) string { + mut out := []string{} + mut prev := true + for l in text.split_into_lines() { + if l.trim_space() == '' { + if prev { + continue + } + out << '' + prev = true + continue + } + prev = false + out << l + } + if out.len > 0 && out.last() == '' { + out.pop() + } + return out.join('\n') +} + +// remove ```?? ``` , can be over multiple lines . +// also removes double lines +pub fn remove_empty_js_blocks(text string) string { + mut out := []string{} + mut block_capture_pre := '' + mut block_capture_inside := []string{} + mut foundblock := false + for l in text.split_into_lines() { + lt := l.trim_space() + if lt.starts_with('```') || lt.starts_with("'''") || lt.starts_with('"""') { + if foundblock { + if block_capture_inside.filter(it.trim_space() != '').len > 0 { + // now we know the block inside is not empty + out << block_capture_pre + out << block_capture_inside + out << l // the last line + } + foundblock = false + block_capture_pre = '' + block_capture_inside = []string{} + continue + } else { + foundblock = true + block_capture_pre = l + continue + } + } + if foundblock { + block_capture_inside << l + } else { + out << l + } + } + if out.len > 0 && out.last() == '' { + out.pop() + } + return remove_double_lines(out.join('\n')) +} diff --git a/lib/core/texttools/clean_test.v b/lib/core/texttools/clean_test.v new file mode 100644 index 00000000..799660d2 --- /dev/null +++ b/lib/core/texttools/clean_test.v @@ -0,0 +1,49 @@ +module texttools + +fn test_clean1() { + mut text := " + '''js + + ''' + something + yes + + else + + ```js + + ``` + + '''js + + inside + ''' + + + " + + mut result := " + something + yes + + else + + '''js + + inside + ''' + " + + text = dedent(text) + result = dedent(result) + + text2 := remove_double_lines(remove_empty_js_blocks(text)) + + print('---') + print(text2) + print('---') + print(result) + print('---') + + assert text2.trim_space() == result.trim_space() +} diff --git a/lib/core/texttools/cmdline_parser.v b/lib/core/texttools/cmdline_parser.v new file mode 100644 index 00000000..64116023 --- /dev/null +++ b/lib/core/texttools/cmdline_parser.v @@ -0,0 +1,106 @@ +module texttools + +enum TextArgsStatus { + start + quote // quote found means value in between '' +} + +// remove all '..' and "..." from a text, so everything in between the quotes +pub fn text_remove_quotes(text string) string { + mut out := '' + mut inquote := false + mut ch := '' + mut char_previous := '' + for i in 0 .. text.len { + ch = text[i..i + 1] + if ch in ['"', "'"] { + if char_previous != '\\' { + inquote = !inquote + char_previous = ch + continue + } + } + if !inquote { + // unmodified add, because we are in quote + out += ch + } + char_previous = ch + } + return out +} + +// test if an element off the array exists in the text but ignore quotes +pub fn check_exists_outside_quotes(text string, items []string) bool { + text2 := text_remove_quotes(text) + for i in items { + if text2.contains(i) { + return true + } + } + return false +} + +// convert text string to arguments +// \n supported but will be \\n and only supported within '' or "" +// \' not modified, same for \" +pub fn cmd_line_args_parser(text string) ![]string { + mut res := []string{} + mut quote := '' + mut char_previous := '' + mut arg := '' + mut ch := '' + + if check_exists_outside_quotes(text, ['<', '>', '|']) { + if !(text.contains(' ')) { + return error("cannot convert text '${text}' to args because no space to split") + } + splitted := text.split_nth(' ', 2) + return [splitted[0], splitted[1]] + } + for i in 0 .. text.len { + ch = text[i..i + 1] + // skip spaces which are not escaped + if ch == ' ' && arg == '' { + continue + } + + if ch in ['"', "'"] { + if char_previous != '\\' { + if quote == '' { + // beginning of quote need to close off previous arg + if arg != '' { + res << arg.trim(' ') + arg = '' + } + quote = ch + char_previous = ch + continue + } else { + // end of quote + quote = '' + res << arg.trim(' ') + arg = '' + char_previous = ch + continue + } + } + } + + if quote != '' { + // unmodified add, because we are in quote + arg += ch + } else { + if ch == ' ' && arg != '' { + res << arg.trim(' ') + arg = '' + } else { + arg += ch + } + } + char_previous = ch + } + if arg != '' { + res << arg.trim(' ') + } + return res +} diff --git a/lib/core/texttools/cmdline_parser_test.v b/lib/core/texttools/cmdline_parser_test.v new file mode 100644 index 00000000..0837cf1d --- /dev/null +++ b/lib/core/texttools/cmdline_parser_test.v @@ -0,0 +1,38 @@ +module texttools + +// how to process command lines +fn test_cmdline_args() { + mut r := []string{} + r = cmd_line_args_parser("'aa bb' ' cc dd' one -two") or { panic(err) } + assert r == ['aa bb', 'cc dd', 'one', '-two'] + r = cmd_line_args_parser("'\taa bb' ' cc dd' one -two") or { panic(err) } + assert r == ['\taa bb', 'cc dd', 'one', '-two'] + // now spaces + r = cmd_line_args_parser(" '\taa bb' ' cc dd' one -two ") or { panic(err) } + assert r == ['\taa bb', 'cc dd', 'one', '-two'] + // now other quote + r = cmd_line_args_parser('"aa bb" " cc dd" one -two') or { panic(err) } + assert r == ['aa bb', 'cc dd', 'one', '-two'] + r = cmd_line_args_parser('"aa bb" \' cc dd\' one -two') or { panic(err) } + assert r == ['aa bb', 'cc dd', 'one', '-two'] + + r = cmd_line_args_parser('find . /tmp') or { panic(err) } + assert r == ['find', '.', '/tmp'] + + r = cmd_line_args_parser("bash -c 'find /'") or { panic(err) } + assert r == ['bash', '-c', 'find /'] + + mut r2 := string('') + r2 = text_remove_quotes('echo "hi >" > /tmp/a.txt') + assert r2 == 'echo > /tmp/a.txt' + r2 = text_remove_quotes("echo 'hi >' > /tmp/a.txt") + assert r2 == 'echo > /tmp/a.txt' + r2 = text_remove_quotes("echo 'hi >' /tmp/a.txt") + assert r2 == 'echo /tmp/a.txt' + assert check_exists_outside_quotes("echo 'hi >' > /tmp/a.txt", ['<', '>', '|']) + assert check_exists_outside_quotes("echo 'hi ' /tmp/a.txt |", ['<', '>', '|']) + assert !check_exists_outside_quotes("echo 'hi >' /tmp/a.txt", ['<', '>', '|']) + + r = cmd_line_args_parser('echo "hi" > /tmp/a.txt') or { panic(err) } + assert r == ['echo', '"hi" > /tmp/a.txt'] +} diff --git a/lib/core/texttools/expand.v b/lib/core/texttools/expand.v new file mode 100644 index 00000000..a80b5115 --- /dev/null +++ b/lib/core/texttools/expand.v @@ -0,0 +1,13 @@ +module texttools + +// texttools.expand('|', 20, ' ') +pub fn expand(txt_ string, l int, expand_with string) string { + mut txt := txt_ + for _ in 0 .. l { + txt += expand_with + } + if txt.len > l { + txt = txt[0..l] + } + return txt +} diff --git a/lib/core/texttools/indent.v b/lib/core/texttools/indent.v new file mode 100644 index 00000000..13355b88 --- /dev/null +++ b/lib/core/texttools/indent.v @@ -0,0 +1,46 @@ +module texttools + +pub fn indent(text string, prefix string) string { + mut res := []string{} + for line in text.split_into_lines() { + res << prefix + line + } + mut t := res.join_lines() + if !t.ends_with('\n') { + t += '\n' + } + return t +} + +// remove all leading spaces at same level +pub fn dedent(text string) string { + mut pre := 999 + mut pre_current := 0 + mut res := []string{} + text_lines := text.split_into_lines() + + for line2 in text_lines { + if line2.trim_space() == '' { + continue + } + line2_expanded_tab := line2.replace('\t', ' ') + line2_expanded_tab_trimmed := line2_expanded_tab.trim_left(' ') + pre_current = line2_expanded_tab.len - line2_expanded_tab_trimmed.len + if pre > pre_current { + pre = pre_current + } + } + // now remove the prefix length + for line2 in text_lines { + line2_expanded_tab := line2.replace('\t', ' ') // important to deal with tabs + line2_expanded_tab_trimmed := line2.trim_space() + + if line2_expanded_tab_trimmed == '' { + res << '' + } else { + res << line2_expanded_tab[pre..] + } + } + final_result := res.join_lines() + return final_result +} diff --git a/lib/core/texttools/indent_test.v b/lib/core/texttools/indent_test.v new file mode 100644 index 00000000..ca21bb42 --- /dev/null +++ b/lib/core/texttools/indent_test.v @@ -0,0 +1,15 @@ +module texttools + +fn test_dedent() { + mut text := ' + a + b + + c + d + + + ' + text = dedent(text) + assert text.len == 20 +} diff --git a/lib/core/texttools/is_tools.v b/lib/core/texttools/is_tools.v new file mode 100644 index 00000000..e53203ca --- /dev/null +++ b/lib/core/texttools/is_tools.v @@ -0,0 +1,31 @@ +module texttools + +pub fn is_int(text string) bool { + for cha in text { + if cha < 48 || cha > 57 { + return false + } + } + return true +} + +pub fn is_upper_text(text string) bool { + for cha in text { + if cha < 65 || cha > 90 { + return false + } + } + return true +} + +// fn sid_check(sid string) bool { +// if sid.len > 6 || sid.len < 2 { +// return false +// } +// for cha in sid { +// if (cha < 48 || cha > 57) && (cha < 97 || cha > 122) { +// return false +// } +// } +// return true +// } diff --git a/lib/core/texttools/is_tools_test.v b/lib/core/texttools/is_tools_test.v new file mode 100644 index 00000000..8729bac1 --- /dev/null +++ b/lib/core/texttools/is_tools_test.v @@ -0,0 +1,18 @@ +module texttools + +fn test_istest1() { + assert is_int('0000') + assert is_int('999') + assert is_int('0') + assert is_int('9') + assert is_int('00 00') == false + assert is_int('00a00') == false + + assert is_upper_text('A') + assert is_upper_text('Z') + assert is_upper_text('AAZZZZAAA') + assert is_upper_text('z') == false + assert is_upper_text('AAZZZZaAA') == false + assert is_upper_text('AAZZZZ?AA') == false + assert is_upper_text("AAZZZZ'AA") == false +} diff --git a/lib/core/texttools/multiline.v b/lib/core/texttools/multiline.v new file mode 100644 index 00000000..34eb1f78 --- /dev/null +++ b/lib/core/texttools/multiline.v @@ -0,0 +1,163 @@ +module texttools + +pub enum MultiLineStatus { + start + multiline + comment +} + +// converst a multiline to a single line, keeping all relevant information +// empty lines removed (unless if in parameter) +// commented lines removed as well (starts with // and #) +// multiline to 'line1\\nline2\\n' +// dedent also done before putting in '...' +// tabs also replaced to 4x space +pub fn multiline_to_single(text string) !string { + mut multiline_first := '' + mut multiline := '' + // mut comment_first:="" + mut comment := []string{} + mut line2 := '' + mut res := []string{} + mut state := MultiLineStatus.start + for line in text.split_into_lines() { + line2 = line + line2 = line2.replace('\t', ' ') + mut line2_trimmed := line2.trim_space() + if state == .multiline { + if multiline_end_check(line2_trimmed) { + // means we are out of multiline + res << multiline_end(multiline_first, multiline) + multiline_first = '' + multiline = '' + state = .start + } else { + multiline += '${line2}\n' + } + continue + } + if state == .comment { + if comment_end_check(line2_trimmed) { + // means we are out of multiline + res << comment_end(comment) + comment = []string{} + state = .start + } else { + comment << line2_trimmed + continue + } + } + if state == .start { + if line2_trimmed == '' { + continue + } + // deal with comments + mut commentpart := '' + line2_trimmed, commentpart = comment_start_check(mut res, line2_trimmed) + if commentpart.len > 0 { + state = .comment + comment = []string{} + comment << commentpart + continue + } + if multiline_start_check(line2_trimmed) { + // means is multiline + state = .multiline + multiline_first = line2_trimmed + continue + } + res << line2_trimmed.trim('\n ') + } + } + // last one + if state == .multiline { + res << multiline_end(multiline_first, multiline) + } + if state == .comment { + res << comment_end(comment) + } + return res.join(' ') +} + +fn multiline_end(multiline_first string, multiline string) string { + mut multiline2 := multiline + + multiline2 = dedent(multiline2) + multiline2 = multiline2.replace('\n', '\\\\n') + multiline2 = multiline2.replace("'", '"') + + firstline_content := multiline_first.all_after_first(':').trim_left('" \'') + name := multiline_first.all_before(':').trim_space() + + if firstline_content.trim_space() != '' { + multiline2 = "${name}:'${multiline_first}\\n${multiline2}'" + } else { + multiline2 = "${name}:'${multiline2}'" + } + return multiline2 +} + +// check that there is multiline start +fn multiline_start_check(text_ string) bool { + if text_ == '' { + return false + } + text := text_.replace(': ', ':').replace(': ', ':').replace(': ', ':') + for tocheck in [":'", ':"', ':"""', ":'''"] { + if text.ends_with(tocheck) { + return true + } + } + return false +} + +fn multiline_end_check(text string) bool { + if text == "'" || text == '"' || text == '"""' || text == "'''" { + return true + } + return false +} + +// return all before comment and if comment +// return trimmedtext,commentpart +fn comment_start_check(mut res []string, text_ string) (string, string) { + mut text := text_ + if text.starts_with('') { + return true + } + if !text.starts_with('//') { + return true + } + return false +} + +fn comment_end(comment []string) string { + mut out := []string{} + for line in comment { + out << line.trim(' <->/\n') + } + mut outstr := out.join('\\\\n') + return '//${outstr}-/' +} diff --git a/lib/core/texttools/mutliline_test.v b/lib/core/texttools/mutliline_test.v new file mode 100644 index 00000000..bad6953b --- /dev/null +++ b/lib/core/texttools/mutliline_test.v @@ -0,0 +1,205 @@ +module texttools + +fn check_result(tocheck_ string, output string) { + mut tocheck := tocheck_ + tocheck = tocheck.replace('\\n', '\\\\n') + // tocheck=tocheck.replace("\'","\\'") + tocheck = tocheck.trim_space() + if tocheck == output.trim_space() { + return + } + + panic('required result not correct.') +} + +fn test_multiline1() { + mut text := " + id:a1 + name:'need to do something 1' + description:' + ## markdown works in it + + description can be multiline + lets see what happens + + 'yes, this needs to work too' + + - a + - something else + - 'something + + ### subtitle + + ```python + #even code block in the other block, crazy parsing for sure + def test(): + + ``` + ' + " + text = multiline_to_single(text) or { panic(err) } + + required_result := 'id:a1 name:\'need to do something 1\' description:\'## markdown works in it\\n\\ndescription can be multiline\\nlets see what happens\\n\\n"yes, this needs to work too"\\n\\n- a\\n- something else\\n- "something\\n\\n### subtitle\\n\\n```python\\n#even code block in the other block, crazy parsing for sure\\ndef test():\\n\\n```\'' + + check_result(required_result, text) +} + +fn test_multiline2() { + mut text := ' + id:a1 + name:\'need to do something 1\' + description:" + ## markdown works in it + + description can be multiline + lets see what happens + \' + ' + text = multiline_to_single(text) or { panic(err) } + + required_result := "id:a1 name:'need to do something 1' description:'## markdown works in it\\n\\ndescription can be multiline\\nlets see what happens'" + + check_result(required_result, text) +} + +fn test_multiline3() { + mut text := ' + id:a1 + name:\'need to do something 1\' + description: """ + ## markdown works in it + + description can be multiline + lets see what happens + \' + ' + text = multiline_to_single(text) or { panic(err) } + + required_result := "id:a1 name:'need to do something 1' description:'## markdown works in it\\n\\ndescription can be multiline\\nlets see what happens'" + + check_result(required_result, text) +} + +fn test_multiline4() { + mut text := ' + id:a1 + name:\'need to do something 1\' + description: """ + ## markdown works in it + + description can be multiline + lets see what happens + """ + ' + text = multiline_to_single(text) or { panic(err) } + + required_result := "id:a1 name:'need to do something 1' description:'## markdown works in it\\n\\ndescription can be multiline\\nlets see what happens'" + + check_result(required_result, text) +} + +fn test_multiline5() { + mut text := " + id:a1 //comment1 + // a comment + name:'need to do something 1' + description: ' + ## markdown works in it + + description can be multiline + lets see what happens + ' + //another comment + " + text = multiline_to_single(text) or { panic(err) } + + required_result := "//comment1-/ id:a1 //a comment-/ name:'need to do something 1' description:'## markdown works in it\\n\\ndescription can be multiline\\nlets see what happens' //another comment-/" + + check_result(required_result, text) +} + +fn test_multiline6() { + mut text := " + id:a1 //comment1 + + // comment m 1 + // comment m 2 + // + // comment m 3 + // + + name:'need to do something 1' + description: ' + ## markdown works in it + + description can be multiline + lets see what happens + ' + + " + text = multiline_to_single(text) or { panic(err) } + + required_result := "//comment1-/ id:a1 //comment m 1\\ncomment m 2\\n\\ncomment m 3\\n-/ name:'need to do something 1' description:'## markdown works in it\\n\\ndescription can be multiline\\nlets see what happens' //another comment-/" + + check_result(required_result, text) +} + +// @[assert_continues] +// fn test_comment_start_check() { +// // TEST: `hello // world, this is mario'`, `hello //world //this is mario` +// mut res := []string{} +// mut str := "hello // world, this is mario'" +// mut text, mut comment := comment_start_check(mut res, str) + +// assert text == 'hello' +// assert res == ["// world, this is mario'-/"] +// assert comment == '' + +// res = []string{} +// str = 'hello //world //this is mario' +// text, comment = comment_start_check(mut res, str) + +// assert text == 'hello' +// assert res == ['//world //this is mario-/'] +// assert comment == '' +// } + +// @[assert_continues] +// fn test_multiline_start_check() { +// // TEST: `hello '''world:'''`, `hello ' world:'`, `hello " world:"`, `hello """ world: """` +// mut text := ["hello '''world:'''", "hello ' world:'", 'hello " world:"', 'hello """ world: """', +// 'hello world: """\n"""'] +// expected := [false, false, false, false, true] +// for idx, input in text { +// got := multiline_start_check(input) +// assert got == expected[idx] +// } +// } + +// TODO: not supported yet, requires a Comment Struct, which knows its + +// name:'need to do something 1' +// description: ' +// ## markdown works in it + +// description can be multiline +// lets see what happens +// ' +// +// " +// text = multiline_to_single(text) or { panic(err) } + +// required_result:="//comment1-/ id:a1 //comment m 1\\ncomment m 2\\n\\ncomment m 3\\n-/ name:'need to do something 1' description:'## markdown works in it\\n\\ndescription can be multiline\\nlets see what happens' //another comment-/" + +// check_result(required_result,text) + +// } diff --git a/lib/core/texttools/namefix.v b/lib/core/texttools/namefix.v new file mode 100644 index 00000000..793f7e14 --- /dev/null +++ b/lib/core/texttools/namefix.v @@ -0,0 +1,178 @@ +// make sure that the names are always normalized so its easy to find them back +module texttools + +import os + +pub fn email_fix(name string) !string { + mut name2 := name.to_lower().trim_space() + if name2.contains('<') { + name2 = name2.split('<')[1].split('<')[0] + } + if !name2.is_ascii() { + return error('email needs to be ascii, was ${name}') + } + if name2.contains(' ') { + return error('email cannot have spaces, was ${name}') + } + return name2 +} + +// like name_fix but _ becomes space +pub fn name_fix_keepspace(name string) !string { + mut name2 := name_fix(name) + name2 = name2.replace('_', ' ') + return name2 +} + +// fix string which represenst a tel nr +pub fn tel_fix(name_ string) !string { + mut name := name_.to_lower().trim_space() + for x in ['[', ']', '{', '}', '(', ')', '*', '-', '.', ' '] { + name = name.replace(x, '') + } + if !name.is_ascii() { + return error('email needs to be ascii, was ${name}') + } + return name +} + +pub fn wiki_fix(content_ string) string { + mut content := content_ + for _ in 0 .. 5 { + content = content.replace('\n\n\n', '\n\n') + } + content = content.replace('\n\n-', '\n-') + return content +} + +pub fn action_multiline_fix(content string) string { + if content.trim_space().contains('\n') { + splitted := content.split('\n') + mut out := '\n' + for item in splitted { + out += ' ${item}\n' + } + return out + } + return content.trim_space() +} + +pub fn name_fix(name string) string { + name2 := name_fix_keepext(name) + return name2 +} + +pub fn name_fix_list(name string) []string { + name2 := name_fix_keepext(name) + return name2.split(',').map(it.trim_space()).map(name_fix(it)) +} + +// get name back keep extensions and underscores, but when end on .md then remove extension +pub fn name_fix_no_md(name string) string { + name2 := name_fix_keepext(name) + if name2.ends_with('.md') { + name3 := name2[0..name2.len - 3] + return name3 + } + return name2 +} + +pub fn name_fix_no_underscore(name string) string { + mut name2 := name_fix_keepext(name) + x := name2.replace('_', '') + + return x +} + +pub fn name_fix_snake_to_pascal(name string) string { + x := name.replace('_', ' ') + p := x.title().replace(' ', '') + return p +} + +pub fn name_fix_dot_notation_to_pascal(name string) string { + x := name.replace('.', ' ') + p := x.title().replace(' ', '') + return p +} + +pub fn name_fix_pascal(name string) string { + name_ := name_fix_snake_to_pascal(name) + return name_fix_dot_notation_to_pascal(name_) +} + +pub fn name_fix_pascal_to_snake(name string) string { + mut fixed := '' + for i, c in name { + if c.is_capital() && i != 0 { + fixed += '_' + } + fixed += c.ascii_str() + } + return fixed.to_lower() +} + +pub fn name_fix_dot_notation_to_snake_case(name string) string { + return name.replace('.', '_') +} + +// remove underscores and extension +pub fn name_fix_no_underscore_no_ext(name_ string) string { + return name_fix_keepext(name_).all_before_last('.').replace('_', '') +} + +// remove underscores and extension +pub fn name_fix_no_ext(name_ string) string { + return name_fix_keepext(name_).all_before_last('.').trim_right('_') +} + +pub fn name_fix_keepext(name_ string) string { + mut name := name_.to_lower().trim_space() + if name.contains('#') { + old_name := name + name = old_name.split('#')[0] + } + + // need to replace . to _ but not the last one (because is ext) + fext := os.file_ext(name) + extension := fext.trim('.') + if extension != '' { + name = name[..(name.len - extension.len - 1)] + } + + to_replace_ := '-;:. ' + mut to_replace := []u8{} + for i in to_replace_ { + to_replace << i + } + + mut out := []u8{} + mut prev := u8(0) + for u in name { + if u == 95 { // underscore + if prev != 95 { + // only when previous is not _ + out << u + } + } else if u > 47 && u < 58 { // see https://www.charset.org/utf-8 + out << u + } else if u > 96 && u < 123 { + out << u + } else if u in to_replace { + if prev != 95 { + out << u8(95) + } + } else { + // means previous one should not be used + continue + } + prev = u + } + name = out.bytestr() + + // name = name.trim(' _') //DONT DO final _ is ok to keep + if extension.len > 0 { + name += '.${extension}' + } + return name +} diff --git a/lib/core/texttools/namefix_test.v b/lib/core/texttools/namefix_test.v new file mode 100644 index 00000000..5034865f --- /dev/null +++ b/lib/core/texttools/namefix_test.v @@ -0,0 +1,8 @@ +module texttools + +fn test_main() { + assert name_fix_keepext('\$sds__ 4F') == 'sds_4f' + assert name_fix_keepext('\$sds_?__ 4F') == 'sds_4f' + assert name_fix_keepext('\$sds_?_!"`{_ 4F') == 'sds_4f' + assert name_fix_keepext('\$sds_?_!"`{_ 4F.jpg') == 'sds_4f.jpg' +} diff --git a/lib/core/texttools/namesplit.v b/lib/core/texttools/namesplit.v new file mode 100644 index 00000000..b54d1ad7 --- /dev/null +++ b/lib/core/texttools/namesplit.v @@ -0,0 +1,56 @@ +module texttools + +import os + +// return (sitename,pagename) +// sitename will be empty string if not specified with site:... or site__... +pub fn name_split(name string) !(string, string) { + mut objname := name.trim(' ') + objname = objname.trim_left('.') + + if name.contains('__') { + parts := name.split('__') + if parts.len != 2 { + return error('filename not well formatted. Needs to have 2 parts around "__". Now ${name}.') + } + objname = '${parts[0].trim(' ')}:${parts[1].trim(' ')}' + } + + // to deal with things like "img/tf_world.jpg ':size=300x160'" + splitted0 := objname.split(' ') + if splitted0.len > 0 { + objname = splitted0[0] + } + objname = name_fix(objname) + mut sitename := '' + splitted := objname.split(':') + if splitted.len == 1 { + objname = splitted[0] + } else if splitted.len == 2 { + sitename = splitted[0] + objname = splitted[1] + } else { + return error("name needs to be in format 'sitename:filename' or 'filename', now '${objname}'") + } + objname = objname.trim_left('.') + if objname.contains('/') { + objname = os.base(objname) + if objname.trim(' ') == '' { + return error('objname empty for os.base') + } + } + // make sure we don't have the e.g. img/ in + if objname.trim('/ ') == '' { + return error('objname empty: ${name}') + } + if objname.ends_with('/') { + return error("objname cannot end with /: now '${name}'") + } + if objname.trim(' ') == '' { + return error('objname empty: ${name}') + } + + // eprintln(" >> namesplit: '$sitename' '$objname'") + + return sitename, objname +} diff --git a/lib/core/texttools/readme.md b/lib/core/texttools/readme.md new file mode 100644 index 00000000..6d8751dc --- /dev/null +++ b/lib/core/texttools/readme.md @@ -0,0 +1,146 @@ +# TextTools Module + +The TextTools module provides a comprehensive set of utilities for text manipulation and processing in V. It includes functions for cleaning, parsing, formatting, and transforming text in various ways. + +## Features + +### Array Operations +- `to_array(r string) []string` - Converts a comma or newline separated list to an array of strings +- `to_array_int(r string) []int` - Converts a text list to an array of integers +- `to_map(mapstring string, line string, delimiter_ string) map[string]string` - Intelligent mapping of a line to a map based on a template + +### Text Cleaning +- `name_clean(r string) string` - Normalizes names by removing special characters +- `ascii_clean(r string) string` - Removes all non-ASCII characters +- `remove_empty_lines(text string) string` - Removes empty lines from text +- `remove_double_lines(text string) string` - Removes consecutive empty lines +- `remove_empty_js_blocks(text string) string` - Removes empty code blocks (```...```) + +### Command Line Parsing +- `cmd_line_args_parser(text string) ![]string` - Parses command line arguments with support for quotes and escaping +- `text_remove_quotes(text string) string` - Removes quoted sections from text +- `check_exists_outside_quotes(text string, items []string) bool` - Checks if items exist in text outside of quotes + +### Text Expansion +- `expand(txt_ string, l int, expand_with string) string` - Expands text to a specified length with a given character + +### Indentation +- `indent(text string, prefix string) string` - Adds indentation prefix to each line +- `dedent(text string) string` - Removes common leading whitespace from every line + +### String Validation +- `is_int(text string) bool` - Checks if text contains only digits +- `is_upper_text(text string) bool` - Checks if text contains only uppercase letters + +### Multiline Processing +- `multiline_to_single(text string) !string` - Converts multiline text to a single line with proper escaping +- Handles comments, code blocks, and preserves formatting + +### Name/Path Processing +- `name_fix(name string) string` - Normalizes filenames and paths +- `name_fix_keepspace(name string) !string` - Like name_fix but preserves spaces +- `name_fix_no_ext(name_ string) string` - Removes file extension +- `name_fix_snake_to_pascal(name string) string` - Converts snake_case to PascalCase +- `name_fix_pascal_to_snake(name string) string` - Converts PascalCase to snake_case +- `name_split(name string) !(string, string)` - Splits name into site and page components + +### Text Splitting +- `split_smart(t string, delimiter_ string) []string` - Intelligent string splitting that respects quotes + +### Tokenization +- `tokenize(text_ string) TokenizerResult` - Tokenizes text into meaningful parts +- `text_token_replace(text string, tofind string, replacewith string) !string` - Replaces tokens in text + +### Version Parsing +- `version(text_ string) int` - Converts version strings to comparable integers + - Example: "v0.4.36" becomes 4036 + - Example: "v1.4.36" becomes 1004036 + +## Usage Examples + +### Array Operations +```v +// Convert comma-separated list to array +text := "item1,item2,item3" +array := texttools.to_array(text) +// Result: ['item1', 'item2', 'item3'] + +// Smart mapping +r := texttools.to_map("name,-,-,-,-,pid,-,-,-,-,path", + "root 304 0.0 0.0 408185328 1360 ?? S 16Dec23 0:34.06 /usr/sbin/distnoted") +// Result: {'name': 'root', 'pid': '1360', 'path': '/usr/sbin/distnoted'} +``` + +### Text Cleaning +```v +// Clean name +name := texttools.name_clean("Hello@World!") +// Result: "HelloWorld" + +// Remove empty lines +text := texttools.remove_empty_lines("line1\n\nline2\n\n\nline3") +// Result: "line1\nline2\nline3" +``` + +### Command Line Parsing +```v +// Parse command line with quotes +args := texttools.cmd_line_args_parser("'arg with spaces' --flag=value") +// Result: ['arg with spaces', '--flag=value'] +``` + +### Indentation +```v +// Add indentation +text := texttools.indent("line1\nline2", " ") +// Result: " line1\n line2\n" + +// Remove common indentation +text := texttools.dedent(" line1\n line2") +// Result: "line1\nline2" +``` + +### Name Processing +```v +// Convert to snake case +name := texttools.name_fix_pascal_to_snake("HelloWorld") +// Result: "hello_world" + +// Convert to pascal case +name := texttools.name_fix_snake_to_pascal("hello_world") +// Result: "HelloWorld" +``` + +### Version Parsing +```v +// Parse version string +ver := texttools.version("v0.4.36") +// Result: 4036 + +ver := texttools.version("v1.4.36") +// Result: 1004036 +``` + +## Error Handling + +Many functions in the module return a Result type (indicated by `!` in the function signature). These functions can return errors that should be handled appropriately: + +```v +// Example of error handling +name := texttools.name_fix_keepspace("some@name") or { + println("Error: ${err}") + return +} +``` + +## Best Practices + +1. Always use appropriate error handling for functions that return Results +2. Consider using `dedent()` before processing multiline text to ensure consistent formatting +3. When working with filenames, use the appropriate name_fix variant based on your needs +4. For command line parsing, be aware of quote handling and escaping rules +5. When using tokenization, consider the context and whether smart splitting is needed + +## Contributing + +The TextTools module is part of the heroLib project. Contributions are welcome through pull requests. diff --git a/lib/core/texttools/regext/readme.md b/lib/core/texttools/regext/readme.md new file mode 100644 index 00000000..fb6acffd --- /dev/null +++ b/lib/core/texttools/regext/readme.md @@ -0,0 +1,46 @@ +# regex + +## basic regex utilities + +- . + +## regex replacer + +Tool to flexibly replace elements in file(s) or text. + +next example does it for + +```golang +import freeflowuniverse.herolib.core.texttools.regext +text := ' + +this is test_1 SomeTest +this is Test 1 SomeTest + +need to replace TF to ThreeFold +need to replace ThreeFold0 to ThreeFold +need to replace ThreeFold1 to ThreeFold + +' + +text_out := ' + +this is TTT SomeTest +this is TTT SomeTest + +need to replace ThreeFold to ThreeFold +need to replace ThreeFold to ThreeFold +need to replace ThreeFold to ThreeFold + +' + +mut ri := regext.regex_instructions_new() +ri.add(['TF:ThreeFold0:ThreeFold1:ThreeFold']) or { panic(err) } +ri.add_item('test_1', 'TTT') or { panic(err) } +ri.add_item('^Stest 1', 'TTT') or { panic(err) } //will be case insensitive search + +mut text_out2 := ri.replace(text: text, dedent: true) or { panic(err) } + +``` + + diff --git a/lib/core/texttools/regext/regexgroups.v b/lib/core/texttools/regext/regexgroups.v new file mode 100644 index 00000000..64f12888 --- /dev/null +++ b/lib/core/texttools/regext/regexgroups.v @@ -0,0 +1,41 @@ +module regext + +import regex + +// find parts of text which are in form {NAME} +// . +// NAME is as follows: . +// Lowercase letters: a-z . +// Digits: 0-9 . +// Underscore: _ . +// . +// will return list of the found NAME's +pub fn find_simple_vars(txt string) []string { + pattern := r'\{(\w+)\}' + mut re := regex.regex_opt(pattern) or { panic(err) } + + mut words := re.find_all_str(txt) + + words = words.map(it.trim('{} ')) + return words +} + +fn remove_sid(c string) string { + if c.starts_with('sid:') { + return c[4..].trim_space() + } + return c +} + +// find parts of text in form sid:abc till sid:abcde (can be a...z 0...9) . +// return list of the found elements . +// to make all e.g. lowercase do e.g. words = words.map(it.to_lower()) after it +pub fn find_sid(txt string) []string { + pattern := r'sid:[a-zA-Z0-9]{3,5}[\s$]' + mut re := regex.regex_opt(pattern) or { panic(err) } + + mut words := re.find_all_str(txt) + // words = words.map(it.to_lower()) + words = words.map(remove_sid(it)) + return words +} diff --git a/lib/core/texttools/regext/regexgroups_test.v b/lib/core/texttools/regext/regexgroups_test.v new file mode 100644 index 00000000..1e5ab24b --- /dev/null +++ b/lib/core/texttools/regext/regexgroups_test.v @@ -0,0 +1,47 @@ +module regext + +fn test_stdtext() { + // this is test without much fancyness, just rext replace, no regex, all case sensitive + + text := ' + +!!action.something sid:aa733 + +sid:aa733 + +...sid:aa733 ss + +...sid:rrrrrr ss +sid:997 + + sid:s d +sid:s_d + +' + + r := find_sid(text) + + assert r == ['aa733', 'aa733', 'aa733', '997'] +} + +fn test_find_simple_vars() { + text := ' + +!!action.something {sid} + +sid:aa733 + +{a} + +...sid:rrrrrr ss {a_sdsdsdsd_e__f_g} +sid:997 + + sid:s d +sid:s_d + +' + + r := find_simple_vars(text) + + assert r == ['sid', 'a', 'a_sdsdsdsd_e__f_g'] +} diff --git a/lib/core/texttools/regext/regexreplacer.v b/lib/core/texttools/regext/regexreplacer.v new file mode 100644 index 00000000..b6c6d4bb --- /dev/null +++ b/lib/core/texttools/regext/regexreplacer.v @@ -0,0 +1,272 @@ +module regext + +import freeflowuniverse.herolib.core.texttools +import regex +import freeflowuniverse.herolib.ui.console +import os + +pub struct ReplaceInstructions { +pub mut: + instructions []ReplaceInstruction +} + +pub struct ReplaceInstruction { +pub: + regex_str string + find_str string + replace_with string +pub mut: + regex regex.RE +} + +fn (mut self ReplaceInstructions) get_regex_queries() []string { + mut res := []string{} + for i in self.instructions { + res << i.regex.get_query() + } + return res +} + +// rewrite a filter string to a regex . +// each char will be checked for in lower case as well as upper case (will match both) . +// will only look at ascii . +//'_- ' will be replaced to match one or more spaces . +// the returned result is a regex string +pub fn regex_rewrite(r string) !string { + r2 := r.to_lower() + mut res := []string{} + for ch in r2 { + mut c := ch.ascii_str() + if 'abcdefghijklmnopqrstuvwxyz'.contains(c) { + char_upper := c.to_upper() + res << '[' + c + char_upper + ']' + } else if '0123456789'.contains(c) { + res << c + } else if '_- '.contains(c) { + // res << r"\[\\s _\\-\]*" + res << r' *' + } else if '\'"'.contains(c) { + continue + } else if '^&![]'.contains(c) { + return error('cannot rewrite regex: ${r}, found illegal char ^&![]') + } + } + return res.join('') + //+r"[\\n \:\!\.\?;,\\(\\)\\[\\]]" +} + +// regex string see https://github.com/vlang/v/blob/master/vlib/regex/README.md . +// find_str is a normal search (text) . +// replace is the string we want to replace the match with +fn (mut self ReplaceInstructions) add_item(regex_find_str string, replace_with string) ! { + mut item := regex_find_str + if item.starts_with('^R') { + item = item[2..] // remove ^r + r := regex.regex_opt(item) or { panic('regex_opt failed') } + self.instructions << ReplaceInstruction{ + regex_str: item + regex: r + replace_with: replace_with + } + } else if item.starts_with('^S') { + item = item[2..] // remove ^S + item2 := regex_rewrite(item)! + r := regex.regex_opt(item2) or { panic('regex_opt failed') } + self.instructions << ReplaceInstruction{ + regex_str: item + regex: r + replace_with: replace_with + } + } else { + self.instructions << ReplaceInstruction{ + replace_with: replace_with + find_str: item + } + } +} + +// each element of the list can have more search statements . +// a search statement can have 3 forms. +// - regex start with ^R see https://github.com/vlang/v/blob/master/vlib/regex/README.md . +// - case insensitive string find start with ^S (will internally convert to regex). +// - just a string, this is a literal find (case sensitive) . +// input is ["^Rregex:replacewith",...] . +// input is ["^Rregex:^Rregex2:replacewith"] . +// input is ["findstr:findstr:replacewith"] . +// input is ["findstr:^Rregex2:replacewith"] . +pub fn (mut ri ReplaceInstructions) add(replacelist []string) ! { + for i in replacelist { + splitted := i.split(':') + replace_with := splitted[splitted.len - 1] + // last one not to be used + if splitted.len < 2 { + return error("Cannot add ${i} because needs to have 2 parts, wrong syntax, to regex instructions:\n\"${replacelist}\"") + } + for item in splitted[0..(splitted.len - 1)] { + ri.add_item(item, replace_with)! + } + } +} + +// a text input file where each line has one of the following +// - regex start with ^R see https://github.com/vlang/v/blob/master/vlib/regex/README.md . +// - case insensitive string find start with ^S (will internally convert to regex). +// - just a string, this is a literal find (case sensitive) . +// example input +// ''' +// ^Rregex:replacewith +// ^Rregex:^Rregex2:replacewith +// ^Sfindstr:replacewith +// findstr:findstr:replacewith +// findstr:^Rregex2:replacewith +// ^Sfindstr:^Sfindstr2::^Rregex2:replacewith +// '''' +pub fn (mut ri ReplaceInstructions) add_from_text(txt string) ! { + mut replacelist := []string{} + for line in txt.split_into_lines() { + if line.trim_space() == '' { + continue + } + if line.contains(':') { + replacelist << line + } + } + ri.add(replacelist)! +} + +@[params] +pub struct ReplaceArgs { +pub mut: + text string + dedent bool +} + +// this is the actual function which will take text as input and return the replaced result +// does the matching line per line . +// will use dedent function, on text +pub fn (mut self ReplaceInstructions) replace(args ReplaceArgs) !string { + mut gi := 0 + mut text2 := args.text + if args.dedent { + text2 = texttools.dedent(text2) + } + mut line2 := '' + mut res := []string{} + + if text2.len == 0 { + return '' + } + // check if there is \n at end of text, because of splitlines would be lost + mut endline := false + if text2.ends_with('\n') { + endline = true + } + for line in text2.split_into_lines() { + line2 = line + + // mut tl := tokenize(line) + + for mut i in self.instructions { + if i.find_str == '' { + all := i.regex.find_all(line) + for gi < all.len { + gi += 2 + } + line2 = i.regex.replace(line2, i.replace_with) + } else { + // line2 = line2.replace(i.find_str, i.replace_with) + // line2 = tl.replace(line2, i.find_str, i.replace_with) ? + + line2 = line2.replace(i.find_str, i.replace_with) + } + } + res << line2 + } + + mut x := res.join('\n') + if !endline { + x = x.trim_right('\n') + } + return x +} + +@[params] +pub struct ReplaceDirArgs { +pub mut: + path string + extensions []string + dryrun bool +} + +// if dryrun is true then will not replace but just show +pub fn (mut self ReplaceInstructions) replace_in_dir(args ReplaceDirArgs) !int { + mut count := 0 + // create list of unique extensions all lowercase + mut extensions := []string{} + for ext in args.extensions { + if ext !in extensions { + mut ext2 := ext.to_lower() + if ext2.starts_with('.') { + ext2 = ext2[1..] + } + extensions << ext2 + } + } + + mut done := []string{} + count += self.replace_in_dir_recursive(args.path, extensions, args.dryrun, mut done)! + return count +} + +// returns how many files changed +fn (mut self ReplaceInstructions) replace_in_dir_recursive(path1 string, extensions []string, dryrun bool, mut done []string) !int { + items := os.ls(path1) or { + return error('cannot load folder for replace because cannot find ${path1}') + } + mut pathnew := '' + mut count := 0 + + for item in items { + pathnew = os.join_path(path1, item) + // CAN DO THIS LATER IF NEEDED + // if pathnew in done{ + // continue + // } + // done << pathnew + if os.is_dir(pathnew) { + if item.starts_with('.') { + continue + } + if item.starts_with('_') { + continue + } + + self.replace_in_dir_recursive(pathnew, extensions, dryrun, mut done)! + } else { + ext := os.file_ext(pathnew)[1..].to_lower() + if extensions == [] || ext in extensions { + // means we match a file + + txtold := os.read_file(pathnew)! + txtnew := self.replace(text: txtold, dedent: false)! + if txtnew.trim(' \n') == txtold.trim(' \n') { + // panic("need to move this file to other lib can't use print_header") + console.print_header(' nothing to do : ${pathnew}') + } else { + // panic("need to move this file to other lib can't use print_header") + console.print_header(' replace done : ${pathnew}') + count++ + if !dryrun { + // now write the file back + os.write_file(pathnew, txtnew)! + } + } + } + } + } + return count +} + +pub fn regex_instructions_new() ReplaceInstructions { + return ReplaceInstructions{} +} diff --git a/lib/core/texttools/regext/regexreplacer_test.v b/lib/core/texttools/regext/regexreplacer_test.v new file mode 100644 index 00000000..ccbe60d2 --- /dev/null +++ b/lib/core/texttools/regext/regexreplacer_test.v @@ -0,0 +1,115 @@ +module regext + +import os +import freeflowuniverse.herolib.core.texttools { dedent } + +fn test_stdtext() { + // this is test without much fancyness, just rext replace, no regex, all case sensitive + + text := ' + + this is test_1 SomeTest + this is test 1 SomeTest + + need to replace TF to ThreeFold + need to replace ThreeFold0 to ThreeFold + need to replace ThreeFold1 to ThreeFold + + ' + + text_out := ' + + this is TTT SomeTest + this is TTT SomeTest + + need to replace ThreeFold to ThreeFold + need to replace ThreeFold to ThreeFold + need to replace ThreeFold to ThreeFold + + ' + + mut ri := regex_instructions_new() + ri.add(['TF:ThreeFold0:ThreeFold1:ThreeFold']) or { panic(err) } + ri.add_item('test_1', 'TTT') or { panic(err) } + ri.add_item('test 1', 'TTT') or { panic(err) } + + mut text_out2 := ri.replace(text: text, dedent: true) or { panic(err) } + + assert dedent(text_out2).trim('\n') == dedent(text_out).trim('\n') +} + +fn test_dirreplace() { + // this is test without much fancyness, just rext replace, no regex, all case sensitive + + // get path where to look for text + mut p := @FILE.split('/') + p = p[0..p.len - 1].clone() + mut path := os.real_path(os.join_path(p.join('/'), 'testdata')) + + mut ri := regex_instructions_new() + + ri.add(['key_bob:KEY_BOB', 'key_alice:KEY_ALICE']) or { panic(err) } + + count := ri.replace_in_dir(path: path, extensions: ['v'], dryrun: true) or { panic(err) } + + assert count == 2 +} + +// fn test_regex1() { +// text := ' + +// this is test_1 SomeTest +// this is test 1 SomeTest + +// need to replace TF to ThreeFold +// need to replace ThreeFold0 to ThreeFold +// need to replace ThreeFold1 to ThreeFold + +// ' + +// text_out := ' + +// this is TTT SomeTest +// this is TTT SomeTest + +// need to replace ThreeFold to ThreeFold +// need to replace ThreeFold to ThreeFold +// need to replace ThreeFold to ThreeFold + +// ' + +// mut ri := regex_instructions_new(['tf:threefold0:^R ThreeFold1:ThreeFold']) or { +// panic(err) +// } +// ri.add('^Rtest[ _]1', 'TTT') or { panic(err) } +// mut text_out2 := ri.replace(text) or { panic(err) } + +// assert dedent(text_out2).trim('\n') == dedent(text_out).trim('\n') +// // panic('s') +// } + +// fn test_regex2() { +// text := ' + +// this is test_1 SomeTest +// this is test 1 SomeTest + +// need to replace ThreeFold 0 to ThreeFold +// need to replace ThreeFold0 to ThreeFold +// no need to replace ThreeFold1; to ThreeFold + +// ' + +// text_out := ' + +// ' + +// mut ri := regex_instructions_new(['^Sthreefold 0:bluelagoon']) or { +// panic(err) +// } + +// mut text_out2 := ri.replace(text) or { panic(err) } + +// assert dedent(text_out2).trim('\n') == dedent(text_out).trim('\n') +// // panic('s') +// } diff --git a/lib/core/texttools/regext/testdata/testfile1.v b/lib/core/texttools/regext/testdata/testfile1.v new file mode 100644 index 00000000..45db9670 --- /dev/null +++ b/lib/core/texttools/regext/testdata/testfile1.v @@ -0,0 +1,3 @@ +fn testfunction1() { + key_bob = 'bobs key' +} diff --git a/lib/core/texttools/regext/testdata/testfile2.v b/lib/core/texttools/regext/testdata/testfile2.v new file mode 100644 index 00000000..30f84e53 --- /dev/null +++ b/lib/core/texttools/regext/testdata/testfile2.v @@ -0,0 +1,3 @@ +fn testfunction2() { + key_alice := 'mock key for regex_test' +} diff --git a/lib/core/texttools/split.v b/lib/core/texttools/split.v new file mode 100644 index 00000000..69883d4f --- /dev/null +++ b/lib/core/texttools/split.v @@ -0,0 +1,51 @@ +module texttools + +enum SplitState { + start + string +} + +// split strings in intelligent ways, taking into consideration '"` +// ``` +// r0:=texttools.split_smart("'root' 304 0.0 0.0 408185328 1360 ?? S 16Dec23 0:34.06 /usr/sbin/distnoted\n \n") +// assert ['root', '304', '0.0', '0.0', '408185328', '1360', '??', 'S', '16Dec23', '0:34.06', '/usr/sbin/distnoted']==r0 +// ``` +pub fn split_smart(t string, delimiter_ string) []string { + mut st := SplitState.start + mut last := []string{} + mut result := []string{} + mut delimiter := delimiter_ + if delimiter.len == 0 { + delimiter = ',| ' + } + for c in t.trim_space().split('') { + if st == .start && '`\'"'.contains(c) { + // means we are at start if quoted string + st = .string + continue + } + if st == .string && '`\'"'.contains(c) { + // means we are at end of quoted string + st = .start + result << last.join('').trim_space() + last = []string{} + continue + } + if st == .string { + last << c + continue + } + if delimiter.contains(c) { + if last.len > 0 { + result << last.join('').trim_space() + } + last = []string{} + continue + } + last << c + } + if last.len > 0 { + result << last.join('').trim_space() + } + return result +} diff --git a/lib/core/texttools/template.v b/lib/core/texttools/template.v new file mode 100644 index 00000000..7062d0d3 --- /dev/null +++ b/lib/core/texttools/template.v @@ -0,0 +1,13 @@ +module texttools + +// replace '^^', '@' . +// replace '??', '$' . +// replace '\t', ' ' . +pub fn template_replace(template_ string) string { + mut template := template_ + template = template.replace('^^', '@') + template = template.replace('???', '$(') + template = template.replace('??', '$') + template = template.replace('\t', ' ') + return template +} diff --git a/lib/core/texttools/tokens.v b/lib/core/texttools/tokens.v new file mode 100644 index 00000000..5939c9c7 --- /dev/null +++ b/lib/core/texttools/tokens.v @@ -0,0 +1,182 @@ +module texttools + +// import regex + +pub struct TokenizerResult { +pub mut: + items []TokenizerItem +} + +pub struct TokenizerItem { +pub mut: + toreplace string + // is the most fixed string + matchstring string +} + +pub fn text_token_replace(text string, tofind string, replacewith string) !string { + mut tr := tokenize(text) + text2 := tr.replace(text, tofind, replacewith)! + return text2 +} + +pub fn (mut tr TokenizerResult) replace(text string, tofind string, replacewith string) !string { + tofind2 := name_fix_no_underscore_token(tofind) + mut text2 := text + for item in tr.items { + if item.matchstring == tofind2 { + // text2 = text2.replace(item.toreplace, replacewith) + new_text := text2.replace(item.toreplace, replacewith) + text2 = new_text + + ///WAS TO GET FULL WORDS TO WORK, IS NOT WORKING !!!! + // if item.matchstring == tofind2 { + // mut new_text := '' + // mut words := text2.split(' ') + // for word in words { + // if word.to_lower() == item.toreplace.to_lower(){ + // new_text += word.replace(item.toreplace, replacewith) + // }else { + // new_text += word + // } + + // new_text += ' ' + // } + // text2 = new_text.trim(' ') + } + // } else { + + // } + } + return text2 +} + +pub fn name_fix_no_underscore_token(name string) string { + item := name_fix_token(name) + newitem := item.replace('_', '') + return newitem +} + +// needs to be 2x because can be 3 to 2 to 1 +const name_fix_replaces = [ + ' ', + '_', + '-', + '_', + '__', + '_', + '__', + '_', + '::', + '_', + ';', + '_', + ':', + '_', + '.', + '_', +] + +pub fn name_fix_token(name string) string { + item := name.to_lower() + item_replaced := item.replace_each(name_fix_replaces) + newitem := item_replaced.trim(' ._') + return newitem +} + +fn word_skip(text string) bool { + lower_text := text.to_lower() + if lower_text in ['the', 'some', 'and', 'plus', 'will', 'do', 'are', 'these'] { + return true + } + return false +} + +pub fn tokenize(text_ string) TokenizerResult { + text := dedent(text_) + + mut skip := false + mut skipline := false + mut prev := '' + mut word := '' + mut islink := false + mut tr := TokenizerResult{} + mut done := []string{} + lines := text.split('\n') + // + for original_line in lines { + line := original_line.trim(' ') + + if line.starts_with('!') { + continue + } + + if line.starts_with('http') { + continue + } + if line.contains("'''") || line.contains('```') || line.contains('"""') { + skipline = !skipline + } + if skipline { + continue + } + prev = '' + word = '' + skip = false + splitted_line := line.split('') + for ch in splitted_line { + if '[({'.contains(ch) { + skip = true + continue + } + if skip { + if ')]}'.contains(ch) { + skip = false + prev = '' + continue + } + } else { + if islink { + if ch == ' ' { + islink = false + } else { + continue + } + } + if 'abcdefghijklmnopqrstuvwxyz0123456789_-'.contains(ch.to_lower()) { + if word.len > 0 || prev == '' || '\t\n ,:;.?!#|'.contains(prev) { + word += ch + } + if word.starts_with('http') { + islink = true + } + } else if '\t\n ,:;.?!#|'.contains(ch) { + // only when end is newline tab or whitespace or ... + if word.len > 1 && !word_skip(word) && word !in done { + word_with_no_underscores := name_fix_no_underscore_token(word) + tr.items << TokenizerItem{ + toreplace: word + matchstring: word_with_no_underscores.clone() + } + done << word + } + word = '' + prev = '' + continue + } else { + word = '' + } + prev = ch + } + } + if word.len > 1 && !word_skip(word) && word !in done { + word_with_no_underscores := name_fix_no_underscore_token(word) + tr.items << TokenizerItem{ + toreplace: word + matchstring: word_with_no_underscores.clone() + } + done << word + } + } + return tr +} diff --git a/lib/core/texttools/tokens_test.v b/lib/core/texttools/tokens_test.v new file mode 100644 index 00000000..3c1519b8 --- /dev/null +++ b/lib/core/texttools/tokens_test.v @@ -0,0 +1,111 @@ +module texttools + +fn test_tokens() { + mut text := ' + these; Are Some ramdom words! + blue lagoon + Blue lagoon + blue_lagoon + blue_Lagoon + lagoon + blueLagoon + &redlagoon + + ' + r := tokenize(text) + + r2 := TokenizerResult{ + items: [TokenizerItem{ + toreplace: 'ramdom' + matchstring: 'ramdom' + }, TokenizerItem{ + toreplace: 'words' + matchstring: 'words' + }, TokenizerItem{ + toreplace: 'blue' + matchstring: 'blue' + }, TokenizerItem{ + toreplace: 'lagoon' + matchstring: 'lagoon' + }, TokenizerItem{ + toreplace: 'Blue' + matchstring: 'blue' + }, TokenizerItem{ + toreplace: 'blue_lagoon' + matchstring: 'bluelagoon' + }, TokenizerItem{ + toreplace: 'blue_Lagoon' + matchstring: 'bluelagoon' + }, TokenizerItem{ + toreplace: 'blueLagoon' + matchstring: 'bluelagoon' + }] + } + + assert r == r2 +} + +// fn test_tokens2() { +// mut text := ' +// these; Are Some ramdom words! +// blue lagoon +// Blue lagoon +// red_dragon +// reddragon +// blue_lagoon +// blue_Lagoon +// lagoon +// ;bluelagoon + +// ' + +// mut ri := regex_instructions_new() +// ri.add(['bluelagoon:red_dragon:ThreeFold']) or { panic(err) } + +// mut text_out2 := ri.replace(text:text) or { panic(err) } + +// compare := ' +// these; Are Some ramdom words! +// blue lagoon +// Blue lagoon +// ThreeFold +// ThreeFold +// ThreeFold +// ThreeFold +// lagoon +// ;ThreeFold + +// ' + +// a := dedent(text_out2).trim(' \n') +// b := dedent(compare).trim(' \n') + +// assert a == b +// } + +fn test_tokens3() { + mut text := r' + - [Definitions](tftech:definitions) + (koekoe) + (great ) + {great } + - [Disclaimer](disclaimer) + - [farmer_terms_conditions](terms_conditions_farmer) + - [terms_conditions_websites](terms_conditions_websites) test + - [terms_conditions_griduser](terms_conditions_griduser) + - [privacypolicy](privacypolicy) + + http://localhost:9998/threefold/#/farming_certification + https://greencloud + + ' + + r := tokenize(text) + + assert r == TokenizerResult{ + items: [TokenizerItem{ + toreplace: 'test' + matchstring: 'test' + }] + } +} diff --git a/lib/core/texttools/version.v b/lib/core/texttools/version.v new file mode 100644 index 00000000..d682ed46 --- /dev/null +++ b/lib/core/texttools/version.v @@ -0,0 +1,21 @@ +module texttools + +import math + +// v0.4.36 becomes 4036 . +// v1.4.36 becomes 1004036 + +pub fn version(text_ string) int { + text := text_.to_lower().replace('v', '') + splitted := text.split('.').filter(it.trim_space() != '').reverse().map(it.trim_space().int()) + mut nr := 0 + mut level := 0 + + for item in splitted { + mut power := math.powi(1000, level) + + nr += item * int(power) + level += 1 + } + return nr +} diff --git a/lib/core/texttools/version_test.v b/lib/core/texttools/version_test.v new file mode 100644 index 00000000..36881c99 --- /dev/null +++ b/lib/core/texttools/version_test.v @@ -0,0 +1,15 @@ +module texttools + +fn test_version() { + assert version(' v0. 0.36 ') == 36 + assert version(' v0.36 ') == 36 + assert version(' 36 ') == 36 + assert version(' v0. 4.36 ') == 4036 + assert version(' v2. 4.36 ') == 2004036 + assert version(' 0.18.0 ') == 18000 + + assert version(' + + v2. 4.36 + ') == 2004036 +} diff --git a/lib/data/currency/amount.v b/lib/data/currency/amount.v new file mode 100644 index 00000000..825ceb7b --- /dev/null +++ b/lib/data/currency/amount.v @@ -0,0 +1,142 @@ +module currency + +pub struct Amount { +pub mut: + currency Currency + val f64 +} + +// convert an Amount into usd +// ARGS: +// - Amount +pub fn (a Amount) usd() f64 { + // calculate usd value towards f64 + usd_val := a.val * a.currency.usdval + return f64(usd_val) +} + +// amount_get +// gets amount and currency from a string input +// ARGS: +// - amount_str string : a human-written string +// -decimals are done with US notation (.) +// - check in string for format e.g. 10.3usd or '10 usd' or '10 USD' or '10 usDC' +// allows £,$,€ to be used as special cases +pub fn amount_get(amount_ string) !Amount { + check() + mut amount := amount_.to_upper() + numbers := ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.'] + for i in ['_', ',', ' '] { + amount = amount.replace(i, '') + } + + amount = amount.replace('$', 'USD') + amount = amount.replace('£', 'GBP') + amount = amount.replace('€', 'EUR') + + // checks if amount or code given first + mut num_first := false + item := amount[0..1] + if item in numbers { + num_first = true + } + + // split up string into two parts, code and amount + mut code := '' + mut num := '' + mut split_string := amount.split('') + if num_first { + mut count := 0 + for index in split_string { + if index !in numbers { + num = amount[0..count] + code = amount[count..amount.len] + break + } + count += 1 + } + } else { + mut count := 0 + for index in split_string { + if index in numbers { + code = amount[0..count] + num = amount[count..amount.len] + break + } + count += 1 + } + } + // remove spaces from code and capitalise + code = code.to_upper().trim_space() + code2 := code.replace('.', '').replace('0', '').trim_space() + if code2 == '' { + code = '' + } + if code == '%' { + amount = '${amount.f64() / 100}' + code = '' + } + if code == '' { + num = amount + code = 'USD' + // } else { + // rlock currencies { + // if code !in currencies { + // rates_get([code], false)! // not sure this will work + // rates_get([code], true)! + // } + // } + } + + mut num2 := num.f64() + + if code.starts_with('E+') { + return error('found currency code with E+ notation, is overflow: ${amount_}') + } + if code.len == 1 { + if code.starts_with('K') { + code = 'USD' + num2 = num2 * 1000 + } else if code.starts_with('M') { + code = 'USD' + num2 = num2 * 1000000 + } else { + return error('found currency code with 1 letter but did not start with k or m (killo or million): ${code}') + } + } else if code.len == 4 { + if code.starts_with('K') { + code = code[1..4] + num2 = num2 * 1000 + } else if code.starts_with('M') { + code = code[1..4] + num2 = num2 * 1000000 + } else { + return error('found currency code with 4 letters but did not start with k or m (killo or million): ${code}') + } + } + + mut mycurr := get(code)! + + mut amount2 := Amount{ + val: num2 + currency: mycurr + } + + return amount2 +} + +// pub fn (mut a0 Amount) add (a2 Amount)! { +// target_currency := amounts[0].currency + +// mut total_val := f64(0) +// for amount in amounts { +// if amount.currency != target_currency { +// return error("Input amounts are of different currencies") +// } +// total_val += amount.val +// } +// return Amount{ +// currency: target_currency +// val: total_val +// } +// } diff --git a/lib/data/currency/currency.v b/lib/data/currency/currency.v new file mode 100644 index 00000000..77816ea0 --- /dev/null +++ b/lib/data/currency/currency.v @@ -0,0 +1,7 @@ +module currency + +pub struct Currency { +pub mut: + name string + usdval f64 +} diff --git a/lib/data/currency/currency_test.v b/lib/data/currency/currency_test.v new file mode 100644 index 00000000..5fb86fa8 --- /dev/null +++ b/lib/data/currency/currency_test.v @@ -0,0 +1,68 @@ +module currency + +import freeflowuniverse.herolib.ui.console + +// pub fn test_amount_get() { +// // assert amount_get("U s d 900").val == 900 +// // assert amount_get("U s d 900").currency.name == 'USD' +// console.print_debug(amount_get('U s d 900')) +// console.print_debug(amount_get('euro321')) +// panic("SSD" +// ) +// } + +pub fn test_rates_get() { + lock currencies { + refresh()! + + println(currencies) + + currencies['TFT'] = Currency{ + name: 'TFT' + usdval: 0.01 + } + currencies['AED'] = Currency{ + name: 'AED' + usdval: 0.25 + } + + currencies['USD'] = Currency{ + name: 'USD' + usdval: 1.0 + } + + mut u := amount_get('1$')! + u2 := u.exchange(get('tft')!)! + assert u2.val == 100.0 + + mut a := amount_get('10Aed')! + mut b := amount_get('AED 10')! + assert a.val == b.val + assert a.currency == b.currency + assert a.val == 10.0 + + c := a.exchange(get('tft ')!)! + assert c.val == 250.0 + + mut aa2 := amount_get('0')! + assert aa2.val == 0.0 + + mut aa := amount_get('10')! + assert aa.val == 10.0 + assert aa.currency.name == 'USD' + assert aa.currency.usdval == 1.0 + + mut a3 := amount_get('20 tft')! + println(a3) + assert a3.currency.usdval == 0.01 + assert a3.usd() == 20.0 * 0.01 + + mut a4 := amount_get('20 k tft')! + println(a4) + assert a4.currency.usdval == 0.01 + assert a4.usd() == 20 * 1000.0 * 0.01 + + mut a5 := amount_get('20mtft')! + assert a5.usd() == 20 * 1000000.0 * 0.01 + } +} diff --git a/lib/data/currency/data.v b/lib/data/currency/data.v new file mode 100644 index 00000000..bb804c5c --- /dev/null +++ b/lib/data/currency/data.v @@ -0,0 +1,195 @@ +module currency + +pub fn refresh() ! { + d := { + 'USDAED': 3.673042 + 'USDAFN': 67.503991 + 'USDALL': 90.350403 + 'USDAMD': 387.170403 + 'USDANG': 1.803359 + 'USDAOA': 912.503981 + 'USDARS': 973.490388 + 'USDAUD': 1.481262 + 'USDAWG': 1.8005 + 'USDAZN': 1.70397 + 'USDBAM': 1.789575 + 'USDBBD': 2.020322 + 'USDBDT': 119.573423 + 'USDBGN': 1.78838 + 'USDBHD': 0.376903 + 'USDBIF': 2893.5 + 'USDBMD': 1 + 'USDBND': 1.306987 + 'USDBOB': 6.939367 + 'USDBRL': 5.611304 + 'USDBSD': 1.000645 + 'USDBTC': 1.5975459e-5 + 'USDBTN': 84.092851 + 'USDBWP': 13.279045 + 'USDBYN': 3.274501 + 'USDBYR': 19600 + 'USDBZD': 2.016881 + 'USDCAD': 1.37705 + 'USDCDF': 2878.000362 + 'USDCHF': 0.857219 + 'USDCLF': 0.033584 + 'USDCLP': 926.680396 + 'USDCNY': 7.066204 + 'USDCNH': 7.073041 + 'USDCOP': 4210.29 + 'USDCRC': 516.884056 + 'USDCUC': 1 + 'USDCUP': 26.5 + 'USDCVE': 101.290394 + 'USDCZK': 23.126804 + 'USDDJF': 177.720393 + 'USDDKK': 6.821304 + 'USDDOP': 60.40504 + 'USDDZD': 132.93575 + 'USDEGP': 48.517284 + 'USDERN': 15 + 'USDETB': 121.39275 + 'USDEUR': 0.91335 + 'USDFJD': 2.230391 + 'USDFKP': 0.761559 + 'USDGBP': 0.765169 + 'USDGEL': 2.71504 + 'USDGGP': 0.761559 + 'USDGHS': 15.95504 + 'USDGIP': 0.761559 + 'USDGMD': 68.503851 + 'USDGNF': 8636.000355 + 'USDGTQ': 7.736965 + 'USDGYD': 209.343075 + 'USDHKD': 7.76988 + 'USDHNL': 24.91504 + 'USDHRK': 6.799011 + 'USDHTG': 131.833342 + 'USDHUF': 366.890388 + 'USDIDR': 15569.15 + 'USDILS': 3.75883 + 'USDIMP': 0.761559 + 'USDINR': 84.13735 + 'USDIQD': 1309.5 + 'USDIRR': 42102.503816 + 'USDISK': 136.650386 + 'USDJEP': 0.761559 + 'USDJMD': 158.41557 + 'USDJOD': 0.708504 + 'USDJPY': 149.13904 + 'USDKES': 129.000351 + 'USDKGS': 85.503799 + 'USDKHR': 4065.00035 + 'USDKMF': 449.503794 + 'USDKPW': 899.999433 + 'USDKRW': 1349.320383 + 'USDKWD': 0.30653 + 'USDKYD': 0.833818 + 'USDKZT': 484.459206 + 'USDLAK': 21880.000349 + 'USDLBP': 89600.000349 + 'USDLKR': 292.894495 + 'USDLRD': 192.803772 + 'USDLSL': 17.490381 + 'USDLTL': 2.95274 + 'USDLVL': 0.60489 + 'USDLYD': 4.795039 + 'USDMAD': 9.803504 + 'USDMDL': 17.659949 + 'USDMGA': 4585.000347 + 'USDMKD': 56.373726 + 'USDMMK': 3247.960992 + 'USDMNT': 3397.999955 + 'USDMOP': 8.008821 + 'USDMRU': 39.750379 + 'USDMUR': 46.103741 + 'USDMVR': 15.350378 + 'USDMWK': 1736.000345 + 'USDMXN': 19.279335 + 'USDMYR': 4.287504 + 'USDMZN': 63.903729 + 'USDNAD': 17.490377 + 'USDNGN': 1640.000344 + 'USDNIO': 36.803722 + 'USDNOK': 10.696745 + 'USDNPR': 134.551493 + 'USDNZD': 1.636822 + 'USDOMR': 0.384447 + 'USDPAB': 1.000618 + 'USDPEN': 3.754604 + 'USDPGK': 3.93225 + 'USDPHP': 57.230375 + 'USDPKR': 277.750374 + 'USDPLN': 3.922272 + 'USDPYG': 7809.426211 + 'USDQAR': 3.641104 + 'USDRON': 4.548504 + 'USDRSD': 106.892552 + 'USDRUB': 95.676332 + 'USDRWF': 1355 + 'USDSAR': 3.755532 + 'USDSBD': 8.299327 + 'USDSCR': 13.582042 + 'USDSDG': 601.503676 + 'USDSEK': 10.371445 + 'USDSGD': 1.305104 + 'USDSHP': 0.761559 + 'USDSLE': 22.847303 + 'USDSLL': 20969.494858 + 'USDSOS': 571.000338 + 'USDSRD': 31.946504 + 'USDSTD': 20697.981008 + 'USDSVC': 8.755725 + 'USDSYP': 2512.529936 + 'USDSZL': 17.403651 + 'USDTHB': 33.155038 + 'USDTJS': 10.666441 + 'USDTMT': 3.51 + 'USDTND': 3.071038 + 'USDTOP': 2.342104 + 'USDTRY': 34.281704 + 'USDTTD': 6.791866 + 'USDTWD': 32.178804 + 'USDTZS': 2725.000335 + 'USDUAH': 41.204244 + 'USDUGX': 3677.388953 + 'USDUYU': 41.843378 + 'USDUZS': 12800.000334 + 'USDVEF': 3622552.534434 + 'USDVES': 38.83528 + 'USDVND': 24820 + 'USDVUV': 118.722009 + 'USDWST': 2.797463 + 'USDXAF': 600.184825 + 'USDXAG': 0.031696 + 'USDXAU': 0.000376 + 'USDXCD': 2.70255 + 'USDXDR': 0.744353 + 'USDXOF': 598.503595 + 'USDXPF': 109.550363 + 'USDYER': 250.350363 + 'USDZAR': 17.409585 + 'USDZMK': 9001.203587 + 'USDZMW': 26.440783 + 'USDZWL': 321.999592 + } + mut result := map[string]f64{} + for name, val in d { + name2 := name.all_after('USD') + result[name2] = val + } + + result['TFT'] = 0.01 + result['XLM'] = 0.092 + result['USDC'] = 1 + result['USD'] = 1 + + lock currencies { + for name, val in result { + currencies[name] = Currency{ + name: name + usdval: val + } + } + } +} diff --git a/lib/data/currency/exchange.v b/lib/data/currency/exchange.v new file mode 100644 index 00000000..4294f138 --- /dev/null +++ b/lib/data/currency/exchange.v @@ -0,0 +1,14 @@ +module currency + +// exchagen the amount to requested target currency +pub fn (mut a0 Amount) exchange(target_currency_ Currency) !Amount { + mut target_currency := target_currency_ + if a0.currency != target_currency { + mut a3 := Amount{ + currency: &target_currency + val: a0.val * a0.currency.usdval / target_currency.usdval + } + return a3 + } + return a0 +} diff --git a/lib/data/currency/factory.v b/lib/data/currency/factory.v new file mode 100644 index 00000000..f0d4e3c7 --- /dev/null +++ b/lib/data/currency/factory.v @@ -0,0 +1,35 @@ +module currency + +__global ( + currencies shared map[string]Currency +) + +fn check() { + if currencies.len == 0 { + refresh() or { panic(err) } + } +} + +// get a currency object based on the name +pub fn get(name_ string) !Currency { + mut name := name_.to_upper().trim_space() + check() + rlock currencies { + return currencies[name] or { + println(currencies) + return error('Could not find currency ${name}') + } + } + panic('bug') +} + +pub fn set_default(name_ string, val f64) ! { + check() + mut name := name_.to_upper().trim_space() + lock currencies { + currencies[name] = Currency{ + name: name + usdval: val + } + } +} diff --git a/lib/data/currency/rates.v b/lib/data/currency/rates.v new file mode 100644 index 00000000..d87d7f17 --- /dev/null +++ b/lib/data/currency/rates.v @@ -0,0 +1,74 @@ +module currency + +import json +import freeflowuniverse.herolib.clients.httpconnection + +struct ResponseBody { + motd string + success string + base string + date string + rates map[string]f32 +} + +// // gets the latest currency exchange rates from an API +// // ARGS: +// // - an array of fiat codes e.g ['EUR', 'AED'] +// // - an array of crypto codes e.g ['TERRA'] +// pub fn get_rates(fiat_array []string, crypto_array []string) !(map[string]f32, map[string]f32) { +// mut fiat_codes := fiat_array.str() +// for i in ["'", '[', ']', ' '] { +// fiat_codes = fiat_codes.replace(i, '') +// } + +// mut crypto_codes := crypto_array.str() +// for i in ["'", '[', ']', ' '] { +// crypto_codes = crypto_codes.replace(i, '') +// } + +// mut response := http.get('https://api.exchangerate.host/latest?base=USD&symbols=USDT,TFT&source=crypto --header 'apikey: '') or {return error("Failed to get crypto http response: $err")} + +// response = http.get('https://api.exchangerate.host/latest?base=USD&symbols=$fiat_codes') or {return error("Failed to get fiat http response: $err")} +// fiat_decoded := json.decode(ResponseBody, response.body) or {return error("Failed to decode fiat json: $err")} + +// return fiat_decoded.rates, crypto_decoded.rates +// } + +// // gets the latest currency exchange rates from an API on internet +// - an array of fiat codes e.g ['EUR', 'AED'] +// - an array of crypto codes e.g ['TERRA'] +// e.g. +pub fn rates_get(cur_array []string, crypto bool) ! { + panic('not implemented,api changed') + // http.CommonHeader.authorization: 'Bearer $h.auth.auth_token' + mut conn := httpconnection.new( + name: 'example' + url: 'https://api.apilayer.com/exchangerates_data/' + cache: true + )! + // do the cache on the connection + conn.cache.expire_after = 3600 * 24 * 2 // make the cache expire_after 2 days + mut cur_codes := cur_array.str() + for i in ["'", '[', ']', ' '] { + cur_codes = cur_codes.replace(i, '') + } + mut prefix := 'latest?base=USD&symbols=${cur_codes}' + if crypto { + prefix += '&source=crypto' + } + // TODO: conn.get hits invalid memory access, let's fix the issue + response := conn.get(prefix: prefix)! + decoded := json.decode(ResponseBody, response) or { + return error('Failed to decode crypto json: ${err}') + } + println(decoded.rates) + for key, rate in decoded.rates { + c := Currency{ + name: key.to_upper() + usdval: 1 / rate + } + lock { + currencies[key.to_upper()] = c + } + } +} diff --git a/lib/data/currency/readme.md b/lib/data/currency/readme.md new file mode 100644 index 00000000..5c23b98d --- /dev/null +++ b/lib/data/currency/readme.md @@ -0,0 +1,140 @@ +# Currency Module + +A comprehensive currency handling module for V that supports both fiat and cryptocurrency operations, currency conversion, and amount parsing. + +## Features + +- Parse currency amounts from human-readable strings +- Support for fiat and cryptocurrencies +- Currency conversion and exchange rates +- USD value calculations +- Support for common currency symbols (€, $, £) +- Multiplier notation support (K for thousands, M for millions) + +## Basic Usage + +### Working with Amounts + +```v +import freeflowuniverse.herolib.data.currency + +// Parse amount from string +mut amount := currency.amount_get('20 USD')! +mut amount2 := currency.amount_get('1.5k EUR')! // k for thousands +mut amount3 := currency.amount_get('2M TFT')! // M for millions + +// Using currency symbols +mut amount4 := currency.amount_get('€100')! // Euro +mut amount5 := currency.amount_get('$50')! // USD +mut amount6 := currency.amount_get('£75')! // GBP + +// Get USD value +usd_value := amount.usd() // converts to USD based on currency's USD value +``` + +### Currency Operations + +```v +// Get a currency +mut usd := currency.get('USD')! +mut eur := currency.get('EUR')! +mut tft := currency.get('TFT')! + +// Create an amount with specific currency +mut amount := Amount{ + currency: usd + val: 100.0 +} + +// Exchange to different currency +mut eur_amount := amount.exchange(eur)! +``` + +## Amount String Format + +The `amount_get` function supports various string formats: + +```v +// All these formats are valid +amount_get('10.3 USD') // Space separated +amount_get('10.3USD') // No space +amount_get('10.3 usd') // Case insensitive +amount_get('$10.3') // Currency symbol +amount_get('10.3') // Defaults to USD if no currency specified +amount_get('5k USD') // k multiplier for thousands +amount_get('1M EUR') // M multiplier for millions +``` + +### Multiplier Support + +- `K` or `k`: Multiplies the amount by 1,000 +- `M` or `m`: Multiplies the amount by 1,000,000 + +Examples: +```v +amount_get('5k USD')! // 5,000 USD +amount_get('2.5K EUR')! // 2,500 EUR +amount_get('1M TFT')! // 1,000,000 TFT +``` + +## Currency Exchange + +The module supports currency exchange operations based on USD values: + +```v +// Create amounts in different currencies +mut usd_amount := currency.amount_get('100 USD')! +mut eur_amount := currency.amount_get('100 EUR')! + +// Exchange USD to EUR +mut in_eur := usd_amount.exchange(eur_amount.currency)! + +// Exchange EUR to USD +mut back_to_usd := eur_amount.exchange(usd_amount.currency)! +``` + +## USD Value Calculations + +Every currency has a USD value that's used for conversions: + +```v +mut amount := currency.amount_get('100 EUR')! + +// Get USD equivalent +usd_value := amount.usd() + +// The calculation is: amount.val * amount.currency.usdval +// For example, if EUR.usdval is 1.1: +// 100 EUR = 100 * 1.1 = 110 USD +``` + +## Error Handling + +The module includes robust error handling: + +```v +// Handle parsing errors +amount := currency.amount_get('invalid') or { + println('Failed to parse amount: ${err}') + return +} + +// Handle exchange errors +converted := amount.exchange(target_currency) or { + println('Failed to exchange currency: ${err}') + return +} +``` + +## Currency Codes + +- Standard 3-letter currency codes are used (USD, EUR, GBP, etc.) +- Special handling for cryptocurrency codes (TFT, BTC, etc.) +- Currency symbols (€, $, £) are automatically converted to their respective codes + +## Notes + +- All decimal values use US notation (dot as decimal separator) +- Commas in numbers are ignored (both "1,000" and "1000" are valid) +- Whitespace is flexible ("100USD" and "100 USD" are both valid) +- Case insensitive ("USD" and "usd" are equivalent) diff --git a/lib/data/dbfs/core.v b/lib/data/dbfs/core.v new file mode 100644 index 00000000..4f1fd864 --- /dev/null +++ b/lib/data/dbfs/core.v @@ -0,0 +1,40 @@ +module dbfs + +import freeflowuniverse.herolib.core.texttools + +// will use default connection and get database with name as specified +// is not encrypted +pub fn db_get(name_ string) !DB { + mut name := texttools.name_fix(name_) + if name == '' { + name = 'core' + } + + mut defaultcollection := get()! + + // name string + // encrypted bool + // withkeys bool //if set means we will use keys in stead of only u32 + // keyshashed bool //if its ok to hash the keys, which will generate id out of these keys and its more scalable + + mut db := defaultcollection.db_create(name: name, withkeys: true)! + + if db.config.encrypted { + return error('db is encrypted and should not be') + } + + return db +} + +// will use default connection and get database with name as specified, if not specified then name=core +// is not encrypted +pub fn db_encrypted(name_ string, secret string) !DB { + mut name := texttools.name_fix(name_) + mut defaultcollection := get(secret: secret)! + + mut db := defaultcollection.db_create(name: name, withkeys: true)! + + db.encrypt()! + + return db +} diff --git a/lib/data/dbfs/db.v b/lib/data/dbfs/db.v new file mode 100644 index 00000000..fb0abd4a --- /dev/null +++ b/lib/data/dbfs/db.v @@ -0,0 +1,309 @@ +module dbfs + +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.crypt.aes_symmetric +import encoding.base64 +// import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct DB { +mut: + config DBConfig +pub mut: + path pathlib.Path + parent &DBCollection @[skip; str: skip] + namedb ?NameDB // optional namedb which is for hashed keys +} + +pub struct DBConfig { +mut: + encrypted bool +pub: + name string + withkeys bool // if set means we will use keys in stead of only u32 + keyshashed bool // if its ok to hash the keys, which will generate id out of these keys and its more scalable + ext string // extension if we want to use it in DB e.g. 'json' + // base64 bool //if binary data will be base encoded, not used now +} + +@[params] +pub struct GetArgs { +pub mut: + key string + id u32 +} + +// get the value, if it doesn't exist then return empty string +pub fn (mut db DB) get(args_ GetArgs) !string { + mut args := args_ + args.key = texttools.name_fix(args.key) + mut pathsrc := pathlib.Path{} + if args.key.len > 0 { + if args.id > 0 { + return error("cann't specify id and key") + } + if db.config.withkeys { + if db.config.keyshashed { + // means we use a namedb + mut ndb := db.namedb or { panic('namedb should be available') } + + args.id, _ = ndb.get(args.key)! + pathsrc = db.path_get(args.id)! + } else { + // now we need to use the link as set + mut datapath0 := '${db.path.path}/${args.key}' + if db.config.ext.len > 0 { + datapath0 += '.${db.config.ext}' + } + pathsrc = pathlib.get_link(path: datapath0, create: false)! + } + } else { + pathsrc = db.path_get(args.id)! + } + } else if args.id > 0 { + pathsrc = db.path_get(args.id)! + } else { + return error('either id or key has to be specified') + } + + mut data := pathsrc.read()! + if data.len == 0 { + panic('data cannot be empty for get:${args}') + } + if db.is_encrypted() { + data = aes_symmetric.decrypt_str(data, db.secret()!) + } + return data +} + +@[params] +pub struct SetArgs { +pub mut: + key string + id u32 + value string + valueb []u8 // as bytes +} + +// set the key/value will go to filesystem, is organzed per context and each db has a name +pub fn (mut db DB) set(args_ SetArgs) !u32 { + // console.print_debug(args_) + mut args := args_ + if args.value.len == 0 && args.valueb.len == 0 { + return error('specify for value or valueb, now both empty') + } + if args.key.len > 0 { + args.key = texttools.name_fix(args.key) + } + + if args.value.len > 0 { + args.valueb = args.value.bytes() + args.value = '' + } + + mut pathsrc := pathlib.Path{} + + // lets deal with key + if args.key.len > 0 { + if args.id > 0 { + return error('cant have id and key at same time') + } + if !db.config.withkeys { + return error('db needs to be with keys') + } + if db.config.keyshashed { + // means we use a namedb + mut ndb := db.namedb or { panic('namedb should be available') } + + args.id = ndb.set(args.key, '')! + pathsrc = db.path_get(args.id)! + } else { + mut datapath0 := '${db.path.path}/${args.key}' + if db.config.ext.len > 0 { + datapath0 += '.${db.config.ext}' + } + pathsrc = pathlib.get_link(path: datapath0, create: false)! + if !pathsrc.exists() { + args.id = db.parent.incr()! + mut destname := '${db.path.path}/${args.key}' + if db.config.ext.len > 0 { + destname += '.${db.config.ext}' + } + pathsrc = db.path_get(args.id)! + pathsrc.write('')! + pathsrc.link(destname, true)! // link the key to the right source info + } else { + mut p3 := pathsrc.getlink()! + p3_name := p3.name() + args.id = p3_name.u32() + } + } + } else if args.id > 0 { + pathsrc = db.path_get(args.id)! + } else { + args.id = db.parent.incr()! + pathsrc = db.path_get(args.id)! + } + console.print_debug('keydb ${pathsrc}') + if db.config.encrypted { + args.valueb = aes_symmetric.encrypt(args.valueb, db.secret()!) + pathsrc.write(base64.encode(args.valueb))! + } else { + pathsrc.writeb(args.valueb)! + } + + assert args.id > 0 + return args.id +} + +// get path based on int id in the DB +fn (mut db DB) path_get(myid u32) !pathlib.Path { + a, b, c := namedb_dbid(myid) + mut destname := c.str() + if db.config.ext.len > 0 { + destname += '.${db.config.ext}' + } + mut mydatafile := pathlib.get_file( + path: '${db.path.path}/${a}/${b}/${destname}' + create: false + )! + return mydatafile +} + +// check if entry exists based on keyname +pub fn (mut db DB) exists(args_ GetArgs) !bool { + mut args := args_ + args.key = texttools.name_fix(args.key) + mut pathsrc := pathlib.Path{} + if args.key.len > 0 { + if args.id > 0 { + return error("cann't specify id and key") + } + if !db.config.withkeys { + return error('db needs to be with keys') + } + if db.config.keyshashed { + // means we use a namedb + mut ndb := db.namedb or { panic('namedb should be available') } + + return ndb.exists(args.key)! + } else { + mut datapath0 := '${db.path.path}/${args.key}' + if db.config.ext.len > 0 { + datapath0 += '.${db.config.ext}' + } + pathsrc = pathlib.get_link(path: datapath0, create: false)! + } + } else { + pathsrc = db.path_get(args.id)! + } + return pathsrc.exists() +} + +// delete an entry +pub fn (mut db DB) delete(args_ GetArgs) ! { + mut args := args_ + if args.key.len > 0 { + args.key = texttools.name_fix(args.key) + } + mut pathsrc := pathlib.Path{} + if args.key.len > 0 { + if args.id > 0 { + return error('cant have id and key at same time') + } + if !db.config.withkeys { + return error('db needs to be with keys') + } + if db.config.keyshashed { + // means we use a namedb + mut ndb := db.namedb or { panic('namedb should be available') } + + args.id, _ = ndb.get(args.key)! + pathsrc = db.path_get(args.id)! + ndb.delete(args.key)! + } else { + mut datapath0 := '${db.path.path}/${args.key}' + if db.config.ext.len > 0 { + datapath0 += '.${db.config.ext}' + } + pathsrc = pathlib.get_link(path: datapath0, create: false)! + if pathsrc.exists() { + mut p3 := pathsrc.getlink()! + p3.delete()! + } + } + } else { + pathsrc = db.path_get(args.id)! + } + pathsrc.delete()! +} + +// delete the db, will not be able to use it any longer +pub fn (mut db DB) destroy() ! { + db.path.delete()! +} + +// get all keys of the db (e.g. per session) can be with a prefix +pub fn (mut db DB) keys(prefix_ string) ![]string { + // TODO: see get, to fix this one + mut files := db.path.list()! + + panic('implement ${files}') + prefix := texttools.name_fix(prefix_) + mut r := db.path.list(recursive: false)! + mut res := []string{} + for item in r.paths { + name := item.name() + if prefix == '' || name.starts_with(prefix) { + res << name + } + } + return res +} + +// get all keys of the db (e.g. per session) can be with a prefix +pub fn (mut db DB) ids() ![]u32 { + // TODO: see get, to fix this one + mut files := db.path.list(files_only: true)! + mut res := []u32{} + for file in files.paths { + res << u32(file.name().int()) + } + return res +} + +// delete all data +pub fn (mut db DB) empty() ! { + db.path.empty()! +} + +fn (mut db DB) secret() !string { + if db.is_encrypted() { + return db.parent.secret + } + return '' +} + +// will mark db for encryption . +// will go over all existing keys and encrypt +pub fn (mut db DB) encrypt() ! { + // TODO: see get, to fix this one + if db.config.encrypted { + return + } + db.secret()! // just to check if ok + for key in db.keys('')! { + v := db.get(key: key)! + encrypted_v := aes_symmetric.encrypt(v.bytes(), db.secret()!) + db.set(key: key, valueb: encrypted_v)! + } + + db.config.encrypted = true + db.path.file_get_new('encrypted')! +} + +pub fn (db DB) is_encrypted() bool { + return db.config.encrypted +} diff --git a/lib/data/dbfs/dbcollection.v b/lib/data/dbfs/dbcollection.v new file mode 100644 index 00000000..4e4fde52 --- /dev/null +++ b/lib/data/dbfs/dbcollection.v @@ -0,0 +1,168 @@ +module dbfs + +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.core.texttools +// import freeflowuniverse.herolib.clients.redisclient +import os +import json +// import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct DBCollection { +pub mut: + path pathlib.Path + contextid u32 + secret string + memberid u16 // needed to do autoincrement of the DB, when user logged in we need memberid, memberid is unique per circle + member_pubkeys map[int]string + // redis redisclient.Redis +} + +pub fn (mut dbcollection DBCollection) incr() !u32 { + mut incr_file := dbcollection.path.file_get_new('incr_${dbcollection.memberid}')! + c := incr_file.read() or { '' } + mut c_int := c.u32() + if c == '' { + c_int = 256 * 256 * dbcollection.memberid // in future we will have to check that we don't go over range for 1 member + } + c_int += 1 + incr_file.write('${c_int}')! + return c_int +} + +@[params] +pub struct DBCreateArgs { +pub mut: + name string + encrypted bool + withkeys bool // if set means we will use keys in stead of only u32 + keyshashed bool // if its ok to hash the keys, which will generate id out of these keys and its more scalable +} + +// create the dabase (init), cannot use unless this is done +//``` +// name string +// encrypted bool +// withkeys bool //if set means we will use keys in stead of only u32 +// keyshashed bool //if its ok to hash the keys, which will generate id out of these keys and its more scalable +//``` +pub fn (mut dbcollection DBCollection) db_create(args_ DBCreateArgs) !DB { + mut args := args_ + args.name = texttools.name_fix(args.name) + mut p := pathlib.get_dir(create: true, path: '${dbcollection.path.path}/${args.name}')! + cfg := DBConfig{ + withkeys: args.withkeys + name: args.name + encrypted: args.encrypted + keyshashed: args.keyshashed + } + mut path_meta := p.file_get('.meta') or { + p2 := pathlib.get_file(path: '${p.path}/.meta', create: true)! + p2 + } + metadata := json.encode(cfg) + path_meta.write(metadata)! + + return dbcollection.db_get(args.name) +} + +pub fn (mut dbcollection DBCollection) db_get_create(args_ DBCreateArgs) !DB { + if dbcollection.exists(args_.name) { + return dbcollection.db_get(args_.name)! + } + mut args := args_ + args.name = texttools.name_fix(args.name) + mut p := pathlib.get_dir(create: true, path: '${dbcollection.path.path}/${args.name}')! + cfg := DBConfig{ + withkeys: args.withkeys + name: args.name + encrypted: args.encrypted + keyshashed: args.keyshashed + } + mut path_meta := p.file_get('.meta') or { + p2 := pathlib.get_file(path: '${p.path}/.meta', create: true)! + p2 + } + metadata := json.encode(cfg) + path_meta.write(metadata)! + + return dbcollection.db_get(args.name) +} + +// get a DB from the dbcollection +pub fn (mut dbcollection DBCollection) db_get(name_ string) !DB { + name := texttools.name_fix(name_) + dirpath := '${dbcollection.path.path}/${name}' + mut p := pathlib.get_dir(create: false, path: dirpath) or { + return error('cant open dabase or find on ${dirpath}') + } + mut path_meta := p.file_get('.meta') or { + p2 := pathlib.get_file(path: '${p.path}/.meta', create: true)! + p2 + } + data := path_meta.read()! + mut cfg := DBConfig{} + if data.len > 0 { + cfg = json.decode(DBConfig, data)! + } + mut db := DB{ + path: p + config: cfg + parent: &dbcollection + } + if cfg.encrypted { + if dbcollection.secret.len < 4 { + return error('secret needs to be specified on dbcollection level, now < 4 chars. \nDB: ${dbcollection}') + } + } + if db.config.withkeys { + if db.config.keyshashed { + // means we use a namedb + db.namedb = namedb_new('${db.path.path}/names')! + } + } + return db +} + +pub fn (mut dbcollection DBCollection) get_encrypted(name_ string) !DB { + mut db := dbcollection.db_get(name_)! + db.encrypt()! + return db +} + +pub fn (mut collection DBCollection) exists(name_ string) bool { + name := texttools.name_fix(name_) + return os.exists('${collection.path.path}/${name}') +} + +pub fn (mut collection DBCollection) delete(name_ string) ! { + name := texttools.name_fix(name_) + mut datafile := collection.path.dir_get(name) or { return } + datafile.delete()! +} + +pub fn (mut collection DBCollection) list() ![]string { + mut r := collection.path.list(recursive: false, dirs_only: true)! + mut res := []string{} + for item in r.paths { + res << item.name() + } + return res +} + +pub fn (mut collection DBCollection) prefix(prefix string) ![]string { + mut res := []string{} + for item in collection.list()! { + // console.print_debug(" ---- $item ($prefix)") + if item.trim_space().starts_with(prefix) { + // console.print_debug("888") + res << item + } + } + return res +} + +// delete all data in the dbcollection (be careful) +pub fn (mut collection DBCollection) destroy() ! { + collection.path.delete()! +} diff --git a/lib/data/dbfs/dbfs_test.v b/lib/data/dbfs/dbfs_test.v new file mode 100644 index 00000000..620e2239 --- /dev/null +++ b/lib/data/dbfs/dbfs_test.v @@ -0,0 +1,80 @@ +module dbfs + +import time +import os +import freeflowuniverse.herolib.ui.console + +fn test_dbfs() { + data_dir := '/tmp/db' + os.rmdir_all(data_dir) or {} + mut dbcollection := get(contextid: 1, dbpath: data_dir, secret: '123456')! + + mut db := dbcollection.db_create(name: 'db_a', encrypted: false, withkeys: true)! + + dotest(mut db, mut dbcollection) or { panic(err) } +} + +fn test_dbfs2() { + data_dir := '/tmp/db' + os.rmdir_all(data_dir) or {} + mut dbcollection := get(contextid: 1, dbpath: data_dir, secret: '123456')! + + mut db := dbcollection.db_create(name: 'db_a', encrypted: true, withkeys: true)! + + dotest(mut db, mut dbcollection) or { panic(err) } +} + +fn test_dbfs3() { + data_dir := '/tmp/db' + os.rmdir_all(data_dir) or {} + mut dbcollection := get(contextid: 1, dbpath: data_dir, secret: '123456')! + + mut db := dbcollection.db_create( + name: 'db_a' + encrypted: false + withkeys: true + keyshashed: true + )! + + panic('need other test') +} + +fn dotest(mut db DB, mut dbcollection DBCollection) ! { + id := db.set(key: 'aaa', value: 'bbbb')! + assert 'bbbb' == db.get(key: 'aaa')! + + id2 := db.set(key: 'aaa', value: 'bbbb2')! + assert 'bbbb2' == db.get(key: 'aaa')! + assert id == id2 + assert id == 1 + + id3 := db.set(key: 'bbb', value: 'bbbb3')! + assert 'bbbb3' == db.get(key: 'bbb')! + assert id3 == id2 + 1 + + assert db.exists(key: 'aaa')! + assert db.exists(key: 'bbb')! + assert db.exists(id: id2)! + assert db.exists(id: id3)! + id3_exsts := db.exists(id: id3 + 1)! + console.print_debug(id3 + 1) + assert id3_exsts == false + + for i in 3 .. 100 { + id4 := db.set(key: 'a${i}', value: 'b${i}')! + console.print_debug('${i} --> ${id4}') + assert i == id4 + } + db.delete(key: 'aaa')! + assert db.exists(key: 'aaa')! == false + assert db.exists(id: id2)! == false + + db.delete(id: 50)! + assert db.exists(key: 'a50')! == false + assert db.exists(id: 50)! == false + + dbcollection.destroy()! + + assert dbcollection.exists('a10') == false + assert db.exists(key: 'test')! == false +} diff --git a/lib/data/dbfs/factory.v b/lib/data/dbfs/factory.v new file mode 100644 index 00000000..f7c35cc6 --- /dev/null +++ b/lib/data/dbfs/factory.v @@ -0,0 +1,35 @@ +module dbfs + +import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.clients.redisclient +import os + +@[params] +pub struct CollectionGetArgs { +pub mut: + dbpath string + secret string + contextid u32 +} + +// will return the dbcollection for a specific context +pub fn get(args_ CollectionGetArgs) !DBCollection { + mut args := args_ + mut secret := args.secret + if args.dbpath == '' { + args.dbpath = '${os.home_dir()}/var/dbfs/${args.contextid}' + } + mut p := pathlib.get_dir(create: true, path: args.dbpath)! + + // mut c:=base.context()! + // mut r:=c.redis()! + // r.selectdb(args_.contextid)! + + mut dbcollection := DBCollection{ + path: p + secret: secret + contextid: args.contextid + // redis:r + } + return dbcollection +} diff --git a/lib/data/dbfs/namedb.v b/lib/data/dbfs/namedb.v new file mode 100644 index 00000000..ef404a67 --- /dev/null +++ b/lib/data/dbfs/namedb.v @@ -0,0 +1,190 @@ +module dbfs + +import json +import crypto.md5 +import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct NameDB { +pub mut: + path pathlib.Path + config NameDBConfig +} + +pub struct NameDBConfig { +pub mut: + levels int = 1 +} + +// TODO: need to put levels in, so that use less directories if nr of items in DB is small + +// purpose of this file is to create an index (can optionally have data attached to this index per key) +// so we can easily map between a key and an id or other way around + +// if key and ok to hash, then we can generated unique id out of the hashed key + +pub fn namedb_new(path string) !NameDB { + mut p := pathlib.get_dir(path: path, create: true)! + mut p_meta := p.file_get('.meta') or { + p2 := pathlib.get_file(path: '${p.path}/.meta', create: true)! + p2 + } + data := p_meta.read()! + mut cfg := NameDBConfig{} + if data.len > 0 { + cfg = json.decode(NameDBConfig, data)! + } + return NameDB{ + path: p + config: cfg + } +} + +pub fn (mut db NameDB) save() ! { + mut p := pathlib.get_file(path: '${db.path.path}/.meta', create: false)! + data := json.encode(db.config) + p.write(data)! +} + +// will store in a place where it can easily be found back and it returns a unique u32 +pub fn (mut db NameDB) set(key string, data string) !u32 { + myid, mut mypath := db.key2path(key)! + // Check if the pubkey already exists in the file + mut line_num := u32(0) + content := mypath.read()! + mut lines := content.trim_space().split_into_lines() + mut lines_out := []string{} + mut idfound := u32(0) + for mut line in lines { + key_in_file, _ := namedb_process_line(mypath.path, line) + if key_in_file == key { + line = '${key}:${data}' + if idfound > 0 { + panic('bug, there is double key, should not be possible') + } + idfound = myid + line_num + } + line_num += 1 + lines_out << line + } + if idfound == 0 { + // need to add the line was not in file yet + lines << '${key}:${data}' + } + mypath.write(lines.join('\n'))! + + return myid + u32(lines.len) - 1 +} + +pub fn (mut db NameDB) delete(key string) ! { + _, mut mypath := db.key2path(key)! + content := mypath.read()! + mut lines := content.trim_space().split_into_lines() + mut lines_out := []string{} + mut found := false + for mut line in lines { + key_in_file, _ := namedb_process_line(mypath.path, line) + if key_in_file == key { + found = true + continue // skip + } + lines_out << line + } + if found { + mypath.write(lines.join('\n'))! + } +} + +// will store in a place where it can easily be found back and it returns a unique u32 +pub fn (mut db NameDB) get(key string) !(u32, string) { + myid, mut mypath := db.key2path(key)! + mut line_num := u32(0) + content := mypath.read()! + mut lines := content.trim_space().split_into_lines() + for line in lines { + key_in_file, data := namedb_process_line(mypath.path, line) + if key_in_file == key { + return myid + line_num, data + } + line_num += 1 + } + return error("can't find key:${key} in db:${db.path.path}") +} + +pub fn (mut db NameDB) exists(key string) !bool { + _, mut mypath := db.key2path(key)! + content := mypath.read()! + mut lines := content.trim_space().split_into_lines() + for line in lines { + key_in_file, _ := namedb_process_line(mypath.path, line) + if key_in_file == key { + return true + } + } + return false +} + +pub fn (mut db NameDB) get_from_id(myid u32) !(string, string) { + // console.print_debug("key get: ${myid}") + mut mypath := db.dbpath(myid)! + // console.print_debug("path: ${mypath.path}") + _, _, c := namedb_dbid(myid) + // console.print_debug("ids: ${a} ${b} ${c}") + content := mypath.read()! + mut lines := content.trim_space().split_into_lines() + // console.print_debug(lines) + if c < lines.len { + myline := lines[c] or { + return error('out of bounds for: ${mypath.path}. Nrlines:${lines.len}. Line:${c}') + } + key_in_file, data := namedb_process_line(mypath.path, myline) + return key_in_file, data + } + return error('Line nr higher than file nr of lines: ${mypath.path}. Nrlines:${lines.len}. Line:${c}') +} + +// calculate the id's as needed to create the path +fn namedb_dbid(myid u32) (u8, u8, u16) { + a := u8(myid / u32(256 * 256)) + a_post := myid - u32(a) * u32(256 * 256) + b := u8(a_post / 256) + b_post := a_post - u32(b) * u32(256) + c := u16(b_post) + return a, b, c +} + +fn (mut db NameDB) key2path(key string) !(u32, pathlib.Path) { + hash_bytes := md5.sum(key.bytes()) + if key.len < 2 { + return error('key needs to be at least 2 chars') + } + a := hash_bytes[0] or { panic('bug') } + b := hash_bytes[1] or { panic('bug') } + myid := u32(int(a) * 256 * 256 + int(b) * 256) + mut mypath := db.dbpath(myid)! + return myid, mypath +} + +fn namedb_process_line(path string, line string) (string, string) { + if line.contains(':') { + myline_parts := line.split(':').map(it.trim_space()) + if myline_parts.len != 2 { + panic('syntax error in line ${line} in ${path}, not enough parts.') + } + return myline_parts[0], myline_parts[1] + } + return line.trim_space(), '' +} + +fn (mut db NameDB) dbpath(myid u32) !pathlib.Path { + a, b, _ := namedb_dbid(myid) + // console.print_debug("dbpath ids: ${a} ${b} ${c}") + dir_name := a.hex() + file_name := b.hex() + mut mydatafile := pathlib.get_file( + path: '${db.path.path}/${dir_name}/${file_name}.txt' + create: true + )! + return mydatafile +} diff --git a/lib/data/dbfs/namedb_test.v b/lib/data/dbfs/namedb_test.v new file mode 100644 index 00000000..bea83f95 --- /dev/null +++ b/lib/data/dbfs/namedb_test.v @@ -0,0 +1,68 @@ +module dbfs + +import os +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.crypt.secp256k1 +import freeflowuniverse.herolib.ui.console + +fn test_dbname1() { + data_dir := '/tmp/namedbtest' + + mut ndb := namedb_new(data_dir)! + + console.print_debug('delete ${data_dir}') + os.rmdir_all(data_dir) or {} + + mut test_cases := []string{} + mut nr := 1 + for i := 0; i < nr * 1000 + 1; i++ { + privkey := secp256k1.new()! + pubkey := privkey.public_key_base64() + if i % 1000 == 0 { + console.print_debug(i) + } + test_cases << pubkey + } + + defer { + // os.rmdir_all(data_dir) or {} + console.print_debug('rmdir done') + } + + // Register public keys and store their unique IDs + mut ids := []u32{} + mut i := 0 + for pubkey in test_cases { + if i % 1000 == 0 { + console.print_debug('b${i}') + } + myid := ndb.set(pubkey, '${i}')! + ids << myid + i++ + } + + // Retrieve public keys using their unique IDs + console.print_debug('retrieve starts') + for i2, myid in ids { + retrieved_pubkey, data := ndb.get(myid)! + myid_found, data_found := ndb.getdata(retrieved_pubkey)! + assert myid_found == myid + assert data_found == data + tc := test_cases[i2] or { + panic("can't find ${i2} in test_cases with len: ${test_cases.len}") + } + assert retrieved_pubkey == tc, 'Retrieved pubkey doesn\'t match for ID: ${myid}' + } + + console.print_debug('All tests passed!') +} + +fn test_dbname2() { + assert namedb_dbid(0).str() == '(0, 0, 0)' + assert namedb_dbid(1).str() == '(0, 0, 1)' + assert namedb_dbid(255).str() == '(0, 0, 255)' + assert namedb_dbid(256).str() == '(0, 1, 0)' + assert namedb_dbid(256 * 256).str() == '(1, 0, 0)' + assert namedb_dbid(256 * 256 + 3).str() == '(1, 0, 3)' + assert namedb_dbid(256 * 256 + 3 + 256).str() == '(1, 1, 3)' +} diff --git a/lib/data/dbfs/readme.md b/lib/data/dbfs/readme.md new file mode 100644 index 00000000..1f7ae9b8 --- /dev/null +++ b/lib/data/dbfs/readme.md @@ -0,0 +1,72 @@ +# dbfs + +a key value stor on filesystem, can work with id's as well as with keys. + +The algo's used have been optimized for scalability and human readability, the idea is that the data files need to be small, efficient and be well usable on a e.g. git based storage system. + +- dbcollection, can linked to a context of hero (can be a circle or another area worth remembering things for) +- db, there can be more than 1 db per dbcollection +- the secret is specified per dbcollection +- each subdb inherits the secret from the dbcollection but needs to be configured as encrypted + +```v + +> TODO: fix, we refactored + +import freeflowuniverse.herolib.data.dbfs + +mut dbcollection := get(context: 'test', secret: '123456')! + +mut db := dbcollection.get_encrypted("db_a")! + +db.set('a', 'bbbb')! +assert 'bbbb' == db.get('a')! + + +``` + +## dbname + +DBName has functionality to efficiently store millions of names and generate a unique id for it, each name gets a unique id, and based on the id the name can be found back easily. + +Some string based data can be attached to one name so it becomes a highly efficient key value stor, can be used for e.g. having DB of pubkeys, for a nameserver, ... + +## dbfs examples + +Each session has such a DB attached to it, data is stored on filesystem, + +e.g. ideal for config sessions (which are done on context level) + + +```golang + +> TODO: fix, we refactored + +import freeflowuniverse.herolib.data.dbfs + +mut dbcollection := get(context: 'test', secret: '123456')! + +mut db := dbcollection.get_encrypted("db_a")! + + +>> TODO: need to be updated, is now based on key and id + +//get the value +fn (mut db DB) get(name_ string) !string { + +//set the key/value will go to filesystem, is organzed per context and each db has a name +fn (mut db DB) set(name_ string, data_ string) ! + +//check if entry exists based on keyname +fn (mut db DB) exists(name_ string) bool + +//delete an entry +fn (mut db DB) delete(name_ string) ! + +//get all keys of the db (e.g. per session) +fn (mut db DB) keys(prefix string) ![]string + +// delete all data +fn (mut db DB) destroy() ! + +``` \ No newline at end of file diff --git a/lib/data/dbfs/sid.v b/lib/data/dbfs/sid.v new file mode 100644 index 00000000..6d08fa31 --- /dev/null +++ b/lib/data/dbfs/sid.v @@ -0,0 +1,13 @@ +module dbfs + +import freeflowuniverse.herolib.core.smartid + +// get u32 from sid as string +pub fn sid2int(sid string) u32 { + return smartid.sid_int(sid) +} + +// represent sid as string, from u32 +fn int2sid(sid u32) string { + return smartid.sid_str(sid) +} diff --git a/lib/data/encoder/auto.v b/lib/data/encoder/auto.v new file mode 100644 index 00000000..e47f02d9 --- /dev/null +++ b/lib/data/encoder/auto.v @@ -0,0 +1,115 @@ +module encoder + +import time +import freeflowuniverse.herolib.ui.console + +// example see https://github.com/vlang/v/blob/master/examples/compiletime/reflection.v + +pub fn encode[T](obj T) ![]u8 { + mut d := new() + // compile-time `for` loop + // T.fields gives an array of a field metadata type + $for field in T.fields { + // Primitive types + $if field.typ is string { + // $(string_expr) produces an identifier + d.add_string(obj.$(field.name).str()) + } $else $if field.typ is int { + d.add_int(int(obj.$(field.name))) + } $else $if field.typ is u8 { + d.add_u8(u8(obj.$(field.name))) + } $else $if field.typ is u16 { + d.add_u16(u16(obj.$(field.name))) + } $else $if field.typ is u32 { + d.add_u32(u32(obj.$(field.name))) + } $else $if field.typ is u64 { + d.add_u64(u64(obj.$(field.name))) + } $else $if field.typ is time.Time { + d.add_time(time.new(obj.$(field.name))) + // Arrays of primitive types + } $else $if field.typ is []string { + // d.add_list_string(obj.$(field.name)) why error?? + d.add_list_string(obj.$(field.name)[..]) + } $else $if field.typ is []int { + d.add_list_int(obj.$(field.name)[..]) + } $else $if field.typ is []u8 { + d.add_list_u8(obj.$(field.name)[..]) + } $else $if field.typ is []u16 { + d.add_list_u16(obj.$(field.name)[..]) + } $else $if field.typ is []u32 { + d.add_list_u32(obj.$(field.name)[..]) + } $else $if field.typ is []u64 { + d.add_list_u64(obj.$(field.name)[..]) + // Maps of primitive types + } $else $if field.typ is map[string]string { + d.add_map_string(obj.$(field.name).clone()) + } $else $if field.typ is map[string][]u8 { + d.add_map_bytes(obj.$(field.name).clone()) + // Structs + } $else $if field.is_struct { + e := encode(obj.$(field.name))! + d.add_list_u8(e) + } $else { + typ_name := typeof(obj.$(field.name)).name + return error("The type `${typ_name}` of field `${field.name}` can't be encoded") + } + } + return d.data +} + +pub fn decode[T](data []u8) !T { + mut d := decoder_new(data) + mut result := T{} + // compile-time `for` loop + // T.fields gives an array of a field metadata type + $for field in T.fields { + // console.print_debug(field.name) + // console.print_debug(typeof(result.$(field.name)).name) + // console.print_debug(result.$(field.name)) + + // Primitive types + $if field.typ is string { + // $(string_expr) produces an identifier + result.$(field.name) = d.get_string() + } $else $if field.typ is int { + result.$(field.name) = d.get_int() + } $else $if field.typ is u8 { + result.$(field.name) = d.get_u8() + } $else $if field.typ is u16 { + result.$(field.name) = d.get_u16() + } $else $if field.typ is u32 { + result.$(field.name) = d.get_u32() + } $else $if field.typ is u64 { + result.$(field.name) = d.get_u64() + } $else $if field.typ is time.Time { + result.$(field.name) = d.get_time() + // Arrays of primitive types + } $else $if field.typ is []string { + result.$(field.name) = d.get_list_string() + } $else $if field.typ is []int { + result.$(field.name) = d.get_list_int() + } $else $if field.typ is []u8 { + result.$(field.name) = d.get_list_u8() + } $else $if field.typ is []u16 { + result.$(field.name) = d.get_list_u16() + } $else $if field.typ is []u32 { + result.$(field.name) = d.get_list_u32() + } $else $if field.typ is []u64 { + result.$(field.name) = d.get_list_u64() + // Maps of primitive types + } $else $if field.typ is map[string]string { + result.$(field.name) = d.get_map_string() + } $else $if field.typ is map[string][]u8 { + result.$(field.name) = d.get_map_bytes() + // Structs + } $else $if field.is_struct { + // TODO handle recursive behavior + } $else { + typ_name := typeof(result.$(field.name)).name + return error("The type `${typ_name}` of field `${field.name}` can't be decoded") + } + } + return result +} + +// TODO: complete, the recursive behavior will be little tricky diff --git a/lib/data/encoder/encoder_decode.v b/lib/data/encoder/encoder_decode.v new file mode 100644 index 00000000..7287906d --- /dev/null +++ b/lib/data/encoder/encoder_decode.v @@ -0,0 +1,155 @@ +module encoder + +import encoding.binary as bin +import freeflowuniverse.herolib.data.ourtime +import time + +pub struct Decoder { +pub mut: + version u8 = 1 // is important + data []u8 +} + +pub fn decoder_new(data []u8) Decoder { + mut e := Decoder{} + e.data = data + // e.data = data.reverse() + return e +} + +pub fn (mut d Decoder) get_string() string { + n := d.get_u16() + v := d.data[..n] + d.data.delete_many(0, n) + return v.bytestr() +} + +pub fn (mut d Decoder) get_int() int { + return int(d.get_u32()) +} + +pub fn (mut d Decoder) get_bytes() []u8 { + n := int(d.get_u32()) + v := d.data[..n] + d.data.delete_many(0, n) + return v +} + +// adds u16 length of string in bytes + the bytes +pub fn (mut d Decoder) get_u8() u8 { + // remove first byte, this corresponds to u8, so the data bytestring becomes 1 byte shorter + v := d.data.first() + d.data.delete(0) + return v +} + +pub fn (mut d Decoder) get_u16() u16 { + v := d.data[..2] + d.data.delete_many(0, 2) + return bin.little_endian_u16(v) +} + +pub fn (mut d Decoder) get_u32() u32 { + v := d.data[..4] + d.data.delete_many(0, 4) + return bin.little_endian_u32(v) +} + +pub fn (mut d Decoder) get_u64() u64 { + v := d.data[..8] + d.data.delete_many(0, 8) + return bin.little_endian_u64(v) +} + +pub fn (mut d Decoder) get_i64() i64 { + v := d.data[..8] + d.data.delete_many(0, 8) + return u64(bin.little_endian_u64(v)) +} + +pub fn (mut d Decoder) get_time() time.Time { + nano_time := d.get_i64() + seconds := nano_time / int(1e9) + nano_seconds := int(nano_time % int(1e9)) + return time.unix_nanosecond(seconds, nano_seconds) +} + +pub fn (mut d Decoder) get_ourtime() ourtime.OurTime { + return ourtime.OurTime{ + unixt: d.get_i64() + } +} + +pub fn (mut d Decoder) get_list_string() []string { + n := d.get_u16() + mut v := []string{len: int(n)} + for i in 0 .. n { + v[i] = d.get_string() + } + return v +} + +pub fn (mut d Decoder) get_list_int() []int { + n := d.get_u16() + mut v := []int{len: int(n)} + for i in 0 .. n { + v[i] = d.get_int() + } + return v +} + +pub fn (mut d Decoder) get_list_u8() []u8 { + n := d.get_u16() + v := d.data[..n] + d.data.delete_many(0, n) + return v +} + +pub fn (mut d Decoder) get_list_u16() []u16 { + n := d.get_u16() + mut v := []u16{len: int(n)} + for i in 0 .. n { + v[i] = d.get_u16() + } + return v +} + +pub fn (mut d Decoder) get_list_u32() []u32 { + n := d.get_u16() + mut v := []u32{len: int(n)} + for i in 0 .. n { + v[i] = d.get_u32() + } + return v +} + +pub fn (mut d Decoder) get_list_u64() []u64 { + n := d.get_u16() + mut v := []u64{len: int(n)} + for i in 0 .. n { + v[i] = d.get_u64() + } + return v +} + +pub fn (mut d Decoder) get_map_string() map[string]string { + n := d.get_u16() + mut v := map[string]string{} + for _ in 0 .. n { + key := d.get_string() + val := d.get_string() + v[key] = val + } + return v +} + +pub fn (mut d Decoder) get_map_bytes() map[string][]u8 { + n := d.get_u16() + mut v := map[string][]u8{} + for _ in 0 .. n { + key := d.get_string() + val := d.get_bytes() + v[key] = val + } + return v +} diff --git a/lib/data/encoder/encoder_encode.v b/lib/data/encoder/encoder_encode.v new file mode 100644 index 00000000..b38e4882 --- /dev/null +++ b/lib/data/encoder/encoder_encode.v @@ -0,0 +1,176 @@ +module encoder + +import time +import encoding.binary as bin +import freeflowuniverse.herolib.data.ourtime + +const kb = 1024 + +pub struct Encoder { +pub mut: + data []u8 + // datatypes []DataType +} + +// enum DataType{ +// string +// int +// bytes +// u8 +// u16 +// u32 +// u64 +// time +// list_string +// list_int +// list_u8 +// list_u16 +// list_u32 +// list_u64 +// map_string +// map_bytes +// } + +pub fn new() Encoder { + mut e := Encoder{} + return e +} + +// adds u16 length of string in bytes + the bytes +pub fn (mut b Encoder) add_string(data string) { + if data.len > 64 * kb { + panic('string cannot be bigger than 64kb') + } + b.add_u16(u16(data.len)) + b.data << data.bytes() +} + +// Please note that unlike C and Go, int is always a 32 bit integer. +// We borrow the add_u32() function to handle the encoding of a 32 bit type +pub fn (mut b Encoder) add_int(data int) { + b.add_u32(u32(data)) +} + +// add bytes or bytestring +pub fn (mut b Encoder) add_bytes(data []u8) { + b.add_u32(u32(data.len)) + b.data << data +} + +pub fn (mut b Encoder) add_u8(data u8) { + b.data << data +} + +pub fn (mut b Encoder) add_u16(data u16) { + mut d := []u8{len: 2} + bin.little_endian_put_u16(mut d, data) + b.data << d +} + +pub fn (mut b Encoder) add_u32(data u32) { + mut d := []u8{len: 4} + bin.little_endian_put_u32(mut d, data) + b.data << d +} + +pub fn (mut b Encoder) add_u64(data u64) { + mut d := []u8{len: 8} + bin.little_endian_put_u64(mut d, data) + b.data << d +} + +pub fn (mut b Encoder) add_i64(data i64) { + mut d := []u8{len: 8} + bin.little_endian_put_u64(mut d, u64(data)) + b.data << d +} + +pub fn (mut b Encoder) add_time(data time.Time) { + b.add_u64(u64(data.unix_nano())) // add as epoch time +} + +pub fn (mut b Encoder) add_ourtime(data ourtime.OurTime) { + b.add_i64(data.unixt) +} + +pub fn (mut b Encoder) add_list_string(data []string) { + if data.len > 64 * kb { + panic('list cannot have more than 64kb items.') + } + b.add_u16(u16(data.len)) + for item in data { + b.add_string(item) + } +} + +pub fn (mut b Encoder) add_list_int(data []int) { + if data.len > 64 * kb { + panic('list cannot have more than 64kb items.') + } + b.add_u16(u16(data.len)) // how many items in list + for item in data { + b.add_int(item) + } +} + +pub fn (mut b Encoder) add_list_u8(data []u8) { + if data.len > 64 * kb { + panic('list cannot have more than 64kb items.') + } + b.add_u16(u16(data.len)) // how many items in list + b.data << data +} + +pub fn (mut b Encoder) add_list_u16(data []u16) { + if data.len > 64 * kb { + panic('list cannot have more than 64kb items.') + } + b.add_u16(u16(data.len)) // how many items in list + for item in data { + b.add_u16(item) + } +} + +pub fn (mut b Encoder) add_list_u32(data []u32) { + if data.len > 64 * kb { + panic('list cannot have more than 64kb items.') + } + b.add_u16(u16(data.len)) // how many items in list + for item in data { + b.add_u32(item) + } +} + +pub fn (mut b Encoder) add_list_u64(data []u64) { + if data.len > 64 * kb { + panic('list cannot have more than 64kb items.') + } + b.add_u16(u16(data.len)) // how many items in list + for item in data { + b.add_u64(item) + } +} + +// when complicated hash e.g. map of other object need to serialize each sub object +pub fn (mut b Encoder) add_map_string(data map[string]string) { + if data.len > 64 * kb { + panic('map cannot have more than 64kb items.') + } + b.add_u16(u16(data.len)) // max nr of items in the map + for key, val in data { + b.add_string(key) + b.add_string(val) + } +} + +// when complicated hash e.g. map of other object need to serialize each sub object +pub fn (mut b Encoder) add_map_bytes(data map[string][]u8) { + if data.len > 64 * kb { + panic('map cannot have more than 64kb items.') + } + b.add_u16(u16(data.len)) // max nr of items in the map + for key, val in data { + b.add_string(key) + b.add_bytes(val) + } +} diff --git a/lib/data/encoder/encoder_test.v b/lib/data/encoder/encoder_test.v new file mode 100644 index 00000000..a7b58b14 --- /dev/null +++ b/lib/data/encoder/encoder_test.v @@ -0,0 +1,285 @@ +module encoder + +import time +import math +import freeflowuniverse.herolib.ui.console + +fn test_string() { + mut e := new() + e.add_string('a') + e.add_string('bc') + assert e.data == [u8(1), 0, 97, 2, 0, 98, 99] + + mut d := decoder_new(e.data) + assert d.get_string() == 'a' + assert d.get_string() == 'bc' +} + +fn test_int() { + mut e := new() + e.add_int(min_i32) + e.add_int(max_i32) + assert e.data == [u8(0x00), 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x7f] + + mut d := decoder_new(e.data) + assert d.get_int() == min_i32 + assert d.get_int() == max_i32 +} + +fn test_bytes() { + sb := 'abcdef'.bytes() + + mut e := new() + e.add_list_u8(sb) + assert e.data == [u8(6), 0, 97, 98, 99, 100, 101, 102] + + mut d := decoder_new(e.data) + assert d.get_list_u8() == sb +} + +fn test_u8() { + mut e := new() + e.add_u8(min_u8) + e.add_u8(max_u8) + assert e.data == [u8(0x00), 0xff] + + mut d := decoder_new(e.data) + assert d.get_u8() == min_u8 + assert d.get_u8() == max_u8 +} + +fn test_u16() { + mut e := new() + e.add_u16(min_u16) + e.add_u16(max_u16) + assert e.data == [u8(0x00), 0x00, 0xff, 0xff] + + mut d := decoder_new(e.data) + assert d.get_u16() == min_u16 + assert d.get_u16() == max_u16 +} + +fn test_u32() { + mut e := new() + e.add_u32(min_u32) + e.add_u32(max_u32) + assert e.data == [u8(0x00), 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff] + + mut d := decoder_new(e.data) + assert d.get_u32() == min_u32 + assert d.get_u32() == max_u32 +} + +fn test_u64() { + mut e := new() + e.add_u64(min_u64) + e.add_u64(max_u64) + assert e.data == [u8(0x00), 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff] + + mut d := decoder_new(e.data) + assert d.get_u64() == min_u64 + assert d.get_u64() == max_u64 +} + +fn test_time() { + mut e := new() + t := time.now() + e.add_time(t) + + mut d := decoder_new(e.data) + assert d.get_time() == t +} + +fn test_list_string() { + list := ['a', 'bc', 'def'] + + mut e := new() + e.add_list_string(list) + assert e.data == [u8(3), 0, 1, 0, 97, 2, 0, 98, 99, 3, 0, 100, 101, 102] + + mut d := decoder_new(e.data) + assert d.get_list_string() == list +} + +fn test_list_int() { + list := [0x872fea95, 0, 0xfdf2e68f] + + mut e := new() + e.add_list_int(list) + assert e.data == [u8(3), 0, 0x95, 0xea, 0x2f, 0x87, 0, 0, 0, 0, 0x8f, 0xe6, 0xf2, 0xfd] + + mut d := decoder_new(e.data) + assert d.get_list_int() == list +} + +fn test_list_u8() { + list := [u8(153), 0, 22] + + mut e := new() + e.add_list_u8(list) + assert e.data == [u8(3), 0, 153, 0, 22] + + mut d := decoder_new(e.data) + assert d.get_list_u8() == list +} + +fn test_list_u16() { + list := [u16(0x8725), 0, 0xfdff] + + mut e := new() + e.add_list_u16(list) + assert e.data == [u8(3), 0, 0x25, 0x87, 0, 0, 0xff, 0xfd] + + mut d := decoder_new(e.data) + assert d.get_list_u16() == list +} + +fn test_list_u32() { + list := [u32(0x872fea95), 0, 0xfdf2e68f] + + mut e := new() + e.add_list_u32(list) + assert e.data == [u8(3), 0, 0x95, 0xea, 0x2f, 0x87, 0, 0, 0, 0, 0x8f, 0xe6, 0xf2, 0xfd] + + mut d := decoder_new(e.data) + assert d.get_list_u32() == list +} + +fn test_map_string() { + mp := { + '1': 'a' + '2': 'bc' + } + + mut e := new() + e.add_map_string(mp) + assert e.data == [u8(2), 0, 1, 0, 49, 1, 0, 97, 1, 0, 50, 2, 0, 98, 99] + + mut d := decoder_new(e.data) + assert d.get_map_string() == mp +} + +fn test_map_bytes() { + mp := { + '1': 'a'.bytes() + '2': 'bc'.bytes() + } + + mut e := new() + e.add_map_bytes(mp) + assert e.data == [u8(2), 0, 1, 0, 49, 1, 0, 0, 0, 97, 1, 0, 50, 2, 0, 0, 0, 98, 99] + + mut d := decoder_new(e.data) + assert d.get_map_bytes() == mp +} + +struct StructType[T] { +mut: + val T +} + +fn get_empty_struct_input[T]() StructType[T] { + return StructType[T]{} +} + +fn get_struct_input[T](val T) StructType[T] { + return StructType[T]{ + val: val + } +} + +fn encode_decode_struct[T](input StructType[T]) bool { + data := encode(input) or { + console.print_debug('Failed to encode, error: ${err}') + return false + } + output := decode[StructType[T]](data) or { + console.print_debug('Failed to decode, error: ${err}') + return false + } + return input == output +} + +fn test_struct() { + // string + assert encode_decode_struct(get_empty_struct_input[string]()) + assert encode_decode_struct(get_struct_input('')) + assert encode_decode_struct(get_struct_input('a')) + + // int + assert encode_decode_struct(get_empty_struct_input[int]()) + assert encode_decode_struct(get_struct_input(-1)) + + // u8 + assert encode_decode_struct(get_empty_struct_input[u8]()) + assert encode_decode_struct(get_struct_input(u8(2))) + + // u16 + assert encode_decode_struct(get_empty_struct_input[u16]()) + assert encode_decode_struct(get_struct_input(u16(3))) + + // u32 + assert encode_decode_struct(get_empty_struct_input[u32]()) + assert encode_decode_struct(get_struct_input(u32(4))) + + // u64 + assert encode_decode_struct(get_empty_struct_input[u64]()) + assert encode_decode_struct(get_struct_input(u64(5))) + + // time.Time + // assert encode_decode_struct[time.Time](get_empty_struct_input[time.Time]()) // get error here + assert encode_decode_struct[time.Time](get_struct_input[time.Time](time.now())) + + // string array + assert encode_decode_struct(get_empty_struct_input[[]string]()) + assert encode_decode_struct(get_struct_input([]string{})) + assert encode_decode_struct(get_struct_input([''])) + assert encode_decode_struct(get_struct_input(['a'])) + + // int array + assert encode_decode_struct(get_empty_struct_input[[]int]()) + assert encode_decode_struct(get_struct_input([]int{})) + assert encode_decode_struct(get_struct_input([-1])) + + // u8 array + assert encode_decode_struct(get_empty_struct_input[[]u8]()) + assert encode_decode_struct(get_struct_input([]u8{})) + assert encode_decode_struct(get_struct_input([u8(2)])) + + // u16 array + assert encode_decode_struct(get_empty_struct_input[[]u16]()) + assert encode_decode_struct(get_struct_input([]u16{})) + assert encode_decode_struct(get_struct_input([u16(3)])) + + // u32 array + assert encode_decode_struct(get_empty_struct_input[[]u32]()) + assert encode_decode_struct(get_struct_input([]u32{})) + assert encode_decode_struct(get_struct_input([u32(4)])) + + // u64 array + assert encode_decode_struct(get_empty_struct_input[[]u64]()) + assert encode_decode_struct(get_struct_input([]u64{})) + assert encode_decode_struct(get_struct_input([u64(5)])) + + // string map + assert encode_decode_struct(get_empty_struct_input[map[string]string]()) + assert encode_decode_struct(get_struct_input(map[string]string{})) + assert encode_decode_struct(get_struct_input({ + '1': 'a' + })) + + // bytes map + assert encode_decode_struct(get_empty_struct_input[map[string][]u8]()) + assert encode_decode_struct(get_struct_input(map[string][]u8{})) + assert encode_decode_struct(get_struct_input({ + '1': 'a'.bytes() + })) + + // struct + assert encode_decode_struct(get_empty_struct_input[StructType[int]]()) + assert encode_decode_struct(get_struct_input(StructType[int]{})) + // assert encode_decode_struct(get_struct_input(StructType[int]{ + // val: int(1) + // })) // decode not implemented +} diff --git a/lib/data/encoder/readme.md b/lib/data/encoder/readme.md new file mode 100644 index 00000000..7366342a --- /dev/null +++ b/lib/data/encoder/readme.md @@ -0,0 +1,247 @@ + +# V Binary Encoder/Decoder + +A high-performance binary encoder/decoder module for V that provides efficient serialization and deserialization of data structures. The encoder supports automatic encoding/decoding of structs using V's compile-time reflection capabilities. + +## Features + +- Automatic struct encoding/decoding using compile-time reflection +- Support for primitive types, arrays, maps, and nested structs +- Compact binary format with length prefixing +- Size limits to prevent memory issues (64KB for strings/lists) +- Comprehensive error handling +- Built-in versioning support + +## Format + +The binary format starts with a version byte (currently v1), followed by the encoded data: + +``` +[version_byte][encoded_data...] +``` + +## Supported Types + +### Primitive Types +- `string` +- `int` (32-bit) +- `u8` +- `u16` +- `u32` +- `u64` +- `time.Time` + +### Arrays +- `[]string` +- `[]int` +- `[]u8` +- `[]u16` +- `[]u32` +- `[]u64` + +### Maps +- `map[string]string` +- `map[string][]u8` + +### Structs +- Nested struct support with automatic encoding/decoding + +## Usage + +### Basic Encoding + +```v +import freeflowuniverse.herolib.data.encoder + +// Create a new encoder +mut e := encoder.new() + +// Add primitive values +e.add_string('hello') +e.add_int(42) +e.add_u8(255) +e.add_u16(65535) +e.add_u32(4294967295) +e.add_u64(18446744073709551615) + +// Add arrays +e.add_list_string(['one', 'two', 'three']) +e.add_list_int([1, 2, 3]) + +// Add maps +e.add_map_string({ + 'key1': 'value1' + 'key2': 'value2' +}) + +// Get encoded bytes +encoded := e.data +``` + +### Basic Decoding + +```v +// Create decoder from bytes +mut d := encoder.decoder_new(encoded) + +// Read values in same order as encoded +str := d.get_string() +num := d.get_int() +byte := d.get_u8() +u16_val := d.get_u16() +u32_val := d.get_u32() +u64_val := d.get_u64() + +// Read arrays +strings := d.get_list_string() +ints := d.get_list_int() + +// Read maps +str_map := d.get_map_string() +``` + +### Automatic Struct Encoding/Decoding + +```v +struct Person { + name string + age int + tags []string + meta map[string]string +} + +// Create struct instance +person := Person{ + name: 'John' + age: 30 + tags: ['developer', 'v'] + meta: { + 'location': 'NYC' + 'role': 'engineer' + } +} + +// Encode struct +encoded := encoder.encode(person)! + +// Decode back to struct +decoded := encoder.decode[Person](encoded)! +``` + +## Example + +Here's a complete example showing how to encode nested structs: + +```v +import freeflowuniverse.herolib.data.encoder + +// Define some nested structs +struct Address { + street string + number int + country string +} + +struct Person { + name string + age int + addresses []Address // nested array of structs + metadata map[string]string +} + +// Example usage +fn main() { + // Create test data + mut person := Person{ + name: 'John Doe' + age: 30 + addresses: [ + Address{ + street: 'Main St' + number: 123 + country: 'USA' + }, + Address{ + street: 'Side St' + number: 456 + country: 'Canada' + } + ] + metadata: { + 'id': 'abc123' + 'type': 'customer' + } + } + + // Encode the data + mut e := encoder.new() + + // Add version byte (v1) + e.add_u8(1) + + // Encode the Person struct + e.add_string(person.name) + e.add_int(person.age) + + // Encode the addresses array + e.add_u16(u16(person.addresses.len)) // number of addresses + for addr in person.addresses { + e.add_string(addr.street) + e.add_int(addr.number) + e.add_string(addr.country) + } + + // Encode the metadata map + e.add_map_string(person.metadata) + + // The binary data is now in e.data + encoded := e.data + + // Later, when decoding, first byte tells us the version + version := encoded[0] + assert version == 1 +} +``` + +## Binary Format Details + +For the example above, the binary layout would be: + +``` +[1] // version byte (v1) +[len][John Doe] // name (u16 length + bytes) +[30] // age (int/u32) +[2] // number of addresses (u16) + [len][Main St] // address 1 street + [123] // address 1 number + [len][USA] // address 1 country + [len][Side St] // address 2 street + [456] // address 2 number + [len][Canada] // address 2 country +[2] // number of metadata entries (u16) + [len][id] // key 1 + [len][abc123] // value 1 + [len][type] // key 2 + [len][customer] // value 2 +``` + + + +## Implementation Details + +### Binary Format + +The encoded data follows this format: + +1. For strings: + - u16 length prefix + - raw string bytes + +2. For arrays: + - u16 length prefix + - encoded elements + +3. For maps: + - u16 count of entries + - encoded key-value pairs + diff --git a/lib/data/encoderhero/any_base.v b/lib/data/encoderhero/any_base.v new file mode 100644 index 00000000..44b6dcde --- /dev/null +++ b/lib/data/encoderhero/any_base.v @@ -0,0 +1,349 @@ +module encoderhero + +// import time + +// // i8 uses `Any` as a 16-bit integer. +// pub fn (f Any) i8() i8 { +// match f { +// i8 { +// return f +// } +// i16, i32, int, i64, u8, u16, u32, u64, f32, f64, bool { +// return i8(f) +// } +// string { +// return f.i8() +// } +// else { +// return 0 +// } +// } +// } + +// // i16 uses `Any` as a 16-bit integer. +// pub fn (f Any) i16() i16 { +// match f { +// i16 { +// return f +// } +// i8, i32, int, i64, u8, u16, u32, u64, f32, f64, bool { +// return i16(f) +// } +// string { +// return f.i16() +// } +// else { +// return 0 +// } +// } +// } + +// // int uses `Any` as an integer. +// pub fn (f Any) int() int { +// match f { +// int { +// return f +// } +// i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, bool { +// return int(f) +// } +// string { +// return f.int() +// } +// else { +// return 0 +// } +// } +// } + +// // i32 uses `Any` as a 32-bit integer. +// pub fn (f Any) i32() i32 { +// match f { +// i32 { +// return f +// } +// i8, i16, int, i64, u8, u16, u32, u64, f32, f64, bool { +// return i32(f) +// } +// string { +// return f.i32() +// } +// else { +// return 0 +// } +// } +// } + +// // i64 uses `Any` as a 64-bit integer. +// pub fn (f Any) i64() i64 { +// match f { +// i64 { +// return f +// } +// i8, i16, i32, int, u8, u16, u32, u64, f32, f64, bool { +// return i64(f) +// } +// string { +// return f.i64() +// } +// else { +// return 0 +// } +// } +// } + +// // u64 uses `Any` as a 64-bit unsigned integer. +// pub fn (f Any) u64() u64 { +// match f { +// u64 { +// return f +// } +// u8, u16, u32, i8, i16, i32, int, i64, f32, f64, bool { +// return u64(f) +// } +// string { +// return f.u64() +// } +// else { +// return 0 +// } +// } +// } + +// // f32 uses `Any` as a 32-bit float. +// pub fn (f Any) f32() f32 { +// match f { +// f32 { +// return f +// } +// bool, i8, i16, i32, int, i64, u8, u16, u32, u64, f64 { +// return f32(f) +// } +// string { +// return f.f32() +// } +// else { +// return 0.0 +// } +// } +// } + +// // f64 uses `Any` as a 64-bit float. +// pub fn (f Any) f64() f64 { +// match f { +// f64 { +// return f +// } +// i8, i16, i32, int, i64, u8, u16, u32, u64, f32 { +// return f64(f) +// } +// string { +// return f.f64() +// } +// else { +// return 0.0 +// } +// } +// } + +// // bool uses `Any` as a bool. +// pub fn (f Any) bool() bool { +// match f { +// bool { +// return f +// } +// string { +// if f == 'false' { +// return false +// } +// if f == 'true' { +// return true +// } +// if f.len > 0 { +// return f != '0' && f != '0.0' +// } else { +// return false +// } +// } +// i8, i16, i32, int, i64 { +// return i64(f) != 0 +// } +// u8, u16, u32, u64 { +// return u64(f) != 0 +// } +// f32, f64 { +// return f64(f) != 0.0 +// } +// else { +// return false +// } +// } +// } + +// // arr uses `Any` as an array. +// pub fn (f Any) arr() []Any { +// if f is []Any { +// return f +// } else if f is map[string]Any { +// mut arr := []Any{} +// for _, v in f { +// arr << v +// } +// return arr +// } +// return [f] +// } + +// // as_map uses `Any` as a map. +// pub fn (f Any) as_map() map[string]Any { +// if f is map[string]Any { +// return f +// } else if f is []Any { +// mut mp := map[string]Any{} +// for i, fi in f { +// mp['${i}'] = fi +// } +// return mp +// } +// return { +// '0': f +// } +// } + +// // to_time uses `Any` as a time.Time. +// pub fn (f Any) to_time() !time.Time { +// match f { +// time.Time { +// return f +// } +// i64 { +// return time.unix(f) +// } +// string { +// is_iso8601 := f[4] == `-` && f[7] == `-` +// if is_iso8601 { +// return time.parse_iso8601(f)! +// } +// is_rfc3339 := f.len == 24 && f[23] == `Z` && f[10] == `T` +// if is_rfc3339 { +// return time.parse_rfc3339(f)! +// } +// mut is_unix_timestamp := true +// for c in f { +// if c == `-` || (c >= `0` && c <= `9`) { +// continue +// } +// is_unix_timestamp = false +// break +// } +// if is_unix_timestamp { +// return time.unix(f.i64()) +// } +// // TODO: parse_iso8601 +// // TODO: parse_rfc2822 +// return time.parse(f)! +// } +// else { +// return error('not a time value: ${f} of type: ${f.type_name()}') +// } +// } +// } + +// // map_from convert a struct to map of Any +// pub fn map_from[T](t T) map[string]Any { +// mut m := map[string]Any{} +// $if T is $struct { +// $for field in T.fields { +// value := t.$(field.name) + +// $if field.is_array { +// mut arr := []Any{} +// for variable in value { +// arr << Any(variable) +// } +// m[field.name] = arr +// arr.clear() +// } $else $if field.is_struct { +// m[field.name] = map_from(value) +// } $else $if field.is_map { +// // TODO +// } $else $if field.is_alias { +// // TODO +// } $else $if field.is_option { +// // TODO +// } $else { +// // TODO: improve memory usage when convert +// $if field.typ is string { +// m[field.name] = value.str() +// } $else $if field.typ is bool { +// m[field.name] = t.$(field.name).str().bool() +// } $else $if field.typ is i8 { +// m[field.name] = t.$(field.name).str().i8() +// } $else $if field.typ is i16 { +// m[field.name] = t.$(field.name).str().i16() +// } $else $if field.typ is i32 { +// m[field.name] = t.$(field.name).str().i32() +// } $else $if field.typ is int { +// m[field.name] = t.$(field.name).str().int() +// } $else $if field.typ is i64 { +// m[field.name] = t.$(field.name).str().i64() +// } $else $if field.typ is f32 { +// m[field.name] = t.$(field.name).str().f32() +// } $else $if field.typ is f64 { +// m[field.name] = t.$(field.name).str().f64() +// } $else $if field.typ is u8 { +// m[field.name] = t.$(field.name).str().u8() +// } $else $if field.typ is u16 { +// m[field.name] = t.$(field.name).str().u16() +// } $else $if field.typ is u32 { +// m[field.name] = t.$(field.name).str().u32() +// } $else $if field.typ is u64 { +// m[field.name] = t.$(field.name).str().u64() +// } $else { +// // return error("The type of `${field.name}` can't be decoded. Please open an issue at https://github.com/vlang/v/issues/new/choose") +// } +// } +// } +// } +// return m +// } + +// // str returns the JSON string representation of the `map[string]Any` type. +// pub fn (f map[string]Any) str() string { +// return Any(f).json_str() +// } + +// // str returns the JSON string representation of the `[]Any` type. +// pub fn (f []Any) str() string { +// return Any(f).json_str() +// } + +// // str returns the string representation of the `Any` type. Use the `json_str` method +// // if you want to use the escaped str() version of the `Any` type. +// pub fn (f Any) str() string { +// if f is string { +// return f +// } else { +// return f.json_str() +// } +// } + +// // json_str returns the JSON string representation of the `Any` type. +// pub fn (f Any) json_str() string { +// return encode(f) +// } + +// // prettify_json_str returns the pretty-formatted JSON string representation of the `Any` type. +// @[manualfree] +// pub fn (f Any) prettify_json_str() string { +// mut params := []u8{} +// defer { +// unsafe { params.free() } +// } +// mut enc := Encoder{ +// newline: `\n` +// newline_spaces_count: 2 +// } +// enc.encode_value(f, mut params) or {} +// return params.bytestr() +// } diff --git a/lib/data/encoderhero/decoder.v b/lib/data/encoderhero/decoder.v new file mode 100644 index 00000000..e14031f6 --- /dev/null +++ b/lib/data/encoderhero/decoder.v @@ -0,0 +1,82 @@ +module encoderhero + +import time +import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.core.texttools + +pub struct Decoder[T] { +pub mut: + object T + data string +} + +pub fn decode[T](data string) !T { + return decode_struct[T](T{}, data) +} + +// decode_struct is a generic function that decodes a JSON map into the struct T. +fn decode_struct[T](_ T, data string) !T { + mut typ := T{} + + $if T is $struct { + obj_name := texttools.name_fix_pascal_to_snake(T.name.all_after_last('.')) + action_name := 'define.${obj_name}' + actions_split := data.split('!!') + actions := actions_split.filter(it.starts_with(action_name)) + + mut action_str := '' + // action_str := '!!define.${obj_name}' + if actions.len == 0 { + return T{} + } else { + action_str = actions[0] + params_str := action_str.trim_string_left(action_name) + params := paramsparser.parse(params_str)! + typ = params.decode[T]()! + } + // panic('debuggge ${t_}\n${actions[0]}') + + // return t_ + $for field in T.fields { + // $if fiel + $if field.is_struct { + $if field.typ !is time.Time { + if !field.name[0].is_capital() { + // skip embedded ones + mut data_fmt := data.replace(action_str, '') + data_fmt = data.replace('define.${obj_name}', 'define') + typ.$(field.name) = decode_struct(typ.$(field.name), data_fmt)! + } + } + } $else $if field.is_array { + if is_struct_array(typ.$(field.name))! { + mut data_fmt := data.replace(action_str, '') + data_fmt = data.replace('define.${obj_name}', 'define') + arr := decode_array(typ.$(field.name), data_fmt)! + typ.$(field.name) = arr + } + } + } + } $else { + return error("The type `${T.name}` can't be decoded.") + } + return typ +} + +pub fn is_struct_array[U](_ []U) !bool { + $if U is $struct { + return true + } + return false +} + +pub fn decode_array[T](_ []T, data string) ![]T { + mut arr := []T{} + // for i in 0 .. val.len { + value := T{} + $if T is $struct { + arr << decode_struct(value, data)! + } + // } + return arr +} diff --git a/lib/data/encoderhero/decoder_test.v b/lib/data/encoderhero/decoder_test.v new file mode 100644 index 00000000..e9fa6ffd --- /dev/null +++ b/lib/data/encoderhero/decoder_test.v @@ -0,0 +1,141 @@ +module encoderhero + +import time +import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.core.texttools + +struct TestStruct { + id int + name string +} + +const blank_script = '!!define.test_struct' +const full_script = '!!define.test_struct id: 42 name: testobject' +const invalid_script = '!!define.another_struct' + +fn test_decode_simple() ! { + mut object := decode[TestStruct](blank_script)! + assert object == TestStruct{} + + object = decode[TestStruct](full_script)! + assert object == TestStruct{ + id: 42 + name: 'testobject' + } + + object = decode[TestStruct](invalid_script) or { + assert true + TestStruct{} + } +} + +struct ChildStruct { + text string + number int +} + +struct ComplexStruct { + id int + name string + child ChildStruct +} + +const blank_complex = '!!define.complex_struct' +const partial_complex = '!!define.complex_struct id: 42 name: testcomplex' +const full_complex = '!!define.complex_struct id: 42 name: testobject +!!define.complex_struct.child text: child_text number: 24 +' + +fn test_decode_complex() ! { + mut object := decode[ComplexStruct](blank_complex)! + assert object == ComplexStruct{} + + object = decode[ComplexStruct](partial_complex)! + assert object == ComplexStruct{ + id: 42 + name: 'testcomplex' + } + + object = decode[ComplexStruct](full_complex) or { + assert true + ComplexStruct{} + } +} + +pub struct Base { + id int + // remarks []Remark TODO: add support +} + +pub struct Remark { + text string +} + +pub struct Person { + Base +mut: + name string + age ?int + birthday time.Time + deathday time.Time + car Car + profiles []Profile +} + +pub struct Car { + name string + year int + insurance Insurance +} + +pub struct Insurance { + provider string + expiration time.Time +} + +pub struct Profile { + platform string + url string +} + +const person_heroscript = " +!!define.person id:1 name:Bob age:21 birthday:'2012-12-12 00:00:00' +!!define.person.car name:'Bob\\'s car' year:2014 +!!define.person.car.insurance expiration:'0000-00-00 00:00:00' provider:'' + +!!define.person.profile platform:Github url:github.com/example +" + +const person = Person{ + id: 1 + name: 'Bob' + age: 21 + birthday: time.new_time( + day: 12 + month: 12 + year: 2012 + ) + car: Car{ + name: "Bob's car" + year: 2014 + } + profiles: [ + Profile{ + platform: 'Github' + url: 'github.com/example' + }, + ] +} + +fn test_decode() ! { + mut object := decode[Person]('')! + assert object == Person{} + + object = decode[Person](person_heroscript)! + assert object == person + + // object = decode[ComplexStruct](full_complex) or { + // assert true + // ComplexStruct{} + // } +} diff --git a/lib/data/encoderhero/encoder.v b/lib/data/encoderhero/encoder.v new file mode 100644 index 00000000..3a4dbc70 --- /dev/null +++ b/lib/data/encoderhero/encoder.v @@ -0,0 +1,145 @@ +module encoderhero + +import freeflowuniverse.herolib.data.paramsparser +import time +import v.reflection +import freeflowuniverse.herolib.data.ourtime +import freeflowuniverse.herolib.core.texttools +// import freeflowuniverse.herolib.ui.console + +// Encoder encodes the an `Any` type into HEROSCRIPT representation. +// It provides parameters in order to change the end result. +pub struct Encoder { +pub mut: + escape_unicode bool = true + action_name string + action_names []string + params paramsparser.Params + children []Encoder + parent ?&Encoder @[skip; str: skip] +} + +// encode is a generic function that encodes a type into a HEROSCRIPT string. +pub fn encode[T](val T) !string { + mut e := Encoder{ + params: paramsparser.Params{} + } + + $if T is $struct { + e.encode_struct[T](val)! + } $else $if T is $array { + e.add_child_list[T](val, 'TODO') + } $else { + return error('can only add elements for struct or array of structs. \n${val}') + } + return e.export()! +} + +// export exports an encoder into encoded heroscript +pub fn (e Encoder) export() !string { + mut script := e.params.export( + pre: '!!define.${e.action_names.join('.')}' + indent: ' ' + skip_empty: true + ) + + script += e.children.map(it.export()!).join('\n') + return script +} + +// needs to be a struct we are adding +// parent is the name of the action e.g define.customer:contact +pub fn (mut e Encoder) add_child[T](val T, parent string) ! { + $if T is $array { + mut counter := 0 + for valitem in val { + mut e2 := e.add_child[T](valitem, '${parent}:${counter}')! + } + return + } + mut e2 := Encoder{ + params: paramsparser.Params{} + parent: &e + action_names: e.action_names.clone() // careful, if not cloned gets mutated later + } + $if T is $struct { + e2.params.set('key', parent) + e2.encode_struct[T](val)! + e.children << e2 + } $else { + return error('can only add elements for struct or array of structs. \n${val}') + } +} + +pub fn (mut e Encoder) add_child_list[U](val []U, parent string) ! { + for i in 0 .. val.len { + mut counter := 0 + $if U is $struct { + e.add_child(val[i], '${parent}:${counter}')! + counter += 1 + } + } +} + +// needs to be a struct we are adding +// parent is the name of the action e.g define.customer:contact +pub fn (mut e Encoder) add[T](val T) ! { + // $if T is []$struct { + // // panic("not implemented") + // for valitem in val{ + // mut e2:=e.add[T](valitem)! + // } + // } + mut e2 := Encoder{ + params: paramsparser.Params{} + parent: &e + action_names: e.action_names.clone() // careful, if not cloned gets mutated later + } + $if T is $struct && T !is time.Time { + e2.params.set('key', '${val}') + e2.encode_struct[T](val)! + e.children << e2 + } $else { + return error('can only add elements for struct or array of structs. \n${val}') + } +} + +pub fn (mut e Encoder) encode_array[U](val []U) ! { + for i in 0 .. val.len { + $if U is $struct { + e.add(val[i])! + } + } +} + +// now encode the struct +pub fn (mut e Encoder) encode_struct[T](t T) ! { + mut mytype := reflection.type_of[T](t) + struct_attrs := attrs_get_reflection(mytype) + + mut action_name := texttools.name_fix_pascal_to_snake(T.name.all_after_last('.')) + if 'alias' in struct_attrs { + action_name = struct_attrs['alias'].to_lower() + } + e.action_names << action_name + + params := paramsparser.encode[T](t, recursive: false)! + e.params = params + + // encode children structs and array of structs + $for field in T.fields { + val := t.$(field.name) + // time is encoded in the above params encoding step so skip and dont treat as recursive struct + $if val is time.Time || val is ourtime.OurTime { + } $else $if val is $struct { + if field.name[0].is_capital() { + embedded_params := paramsparser.encode(val, recursive: false)! + e.params.params << embedded_params.params + } else { + e.add(val)! + } + } $else $if val is $array { + e.encode_array(val)! + } + } +} diff --git a/lib/data/encoderhero/encoder_test.v b/lib/data/encoderhero/encoder_test.v new file mode 100644 index 00000000..29e47de5 --- /dev/null +++ b/lib/data/encoderhero/encoder_test.v @@ -0,0 +1,75 @@ +module encoderhero + +import freeflowuniverse.herolib.data.paramsparser +import time +import v.reflection + +struct Base { + id int + remarks []Remark +} + +struct Remark { + text string +} + +struct Person { + Base +mut: + name string + age ?int = 20 + birthday time.Time + deathday ?time.Time + car Car + profiles []Profile +} + +struct Car { + name string + year int + insurance Insurance +} + +struct Insurance { + provider string + expiration time.Time +} + +struct Profile { + platform string + url string +} + +const person_heroscript = " +!!define.person id:1 name:Bob birthday:'2012-12-12 00:00:00' +!!define.person.car name:'Bob\\'s car' year:2014 +!!define.person.car.insurance expiration:'0000-00-00 00:00:00' provider:'' + +!!define.person.profile platform:Github url:github.com/example +" + +const person = Person{ + id: 1 + name: 'Bob' + age: 21 + birthday: time.new_time( + day: 12 + month: 12 + year: 2012 + ) + car: Car{ + name: "Bob's car" + year: 2014 + } + profiles: [ + Profile{ + platform: 'Github' + url: 'github.com/example' + }, + ] +} + +fn test_encode() ! { + person_script := encode[Person](person)! + assert person_script.trim_space() == person_heroscript.trim_space() +} diff --git a/lib/data/encoderhero/readme.md b/lib/data/encoderhero/readme.md new file mode 100644 index 00000000..af10be2c --- /dev/null +++ b/lib/data/encoderhero/readme.md @@ -0,0 +1,69 @@ +# hero Encoder + +> encoder hero is based on json2 from https://github.com/vlang/v/blob/master/vlib/x/json2/README.md + +## Usage + +#### encode[T] + +```v +#!/usr/bin/env -S v -n -cg -w -enable-globals run + +import freeflowuniverse.herolib.data.encoderhero +import time + +struct Person { +mut: + name string + age ?int = 20 + birthday time.Time + deathday ?time.Time +} + +mut person := Person{ + name: 'Bob' + birthday: time.now() +} +heroscript := encoderhero.encode[Person](person)! + +``` + +#### decode[T] + +```v +import freeflowuniverse.herolib.data.encoderhero +import time + +struct Person { +mut: + name string + age ?int = 20 + birthday time.Time + deathday ?time.Time +} + +data := ' + +' + +person := encoderhero.decode[Person](data)! +/* +struct Person { + mut: + name "Bob" + age 20 + birthday "2022-03-11 13:54:25" + } +*/ + +``` + + +## License + +for all original code as used from Alexander: + +// Copyright (c) 2019-2024 Alexander Medvednikov. All rights reserved. +// Use of this source code is governed by an MIT license +// that can be found in the LICENSE file. + diff --git a/lib/data/encoderhero/tools.v b/lib/data/encoderhero/tools.v new file mode 100644 index 00000000..ea869580 --- /dev/null +++ b/lib/data/encoderhero/tools.v @@ -0,0 +1,26 @@ +module encoderhero + +import v.reflection + +// if at top of struct we have: @[name:"teststruct " ; params] . +// will return {'name': 'teststruct', 'params': ''} +fn attrs_get_reflection(mytype reflection.Type) map[string]string { + if mytype.sym.info is reflection.Struct { + return attrs_get(mytype.sym.info.attrs) + } + return map[string]string{} +} + +// will return {'name': 'teststruct', 'params': ''} +fn attrs_get(attrs []string) map[string]string { + mut out := map[string]string{} + for i in attrs { + if i.contains('=') { + kv := i.split('=') + out[kv[0].trim_space().to_lower()] = kv[1].trim_space().to_lower() + } else { + out[i.trim_space().to_lower()] = '' + } + } + return out +} diff --git a/lib/data/encoderhero/types.v b/lib/data/encoderhero/types.v new file mode 100644 index 00000000..68108f13 --- /dev/null +++ b/lib/data/encoderhero/types.v @@ -0,0 +1,93 @@ +module encoderhero + +import time + +// byte array versions of the most common tokens/chars to avoid reallocations +const null_in_bytes = 'null' + +const true_in_string = 'true' + +const false_in_string = 'false' + +const empty_array = [u8(`[`), `]`]! + +const comma_rune = `,` + +const colon_rune = `:` + +const quote_rune = `"` + +const back_slash = [u8(`\\`), `\\`]! + +const quote = [u8(`\\`), `"`]! + +const slash = [u8(`\\`), `/`]! + +const null_unicode = [u8(`\\`), `u`, `0`, `0`, `0`, `0`]! + +const ascii_control_characters = ['\\u0000', '\\t', '\\n', '\\r', '\\u0004', '\\u0005', '\\u0006', + '\\u0007', '\\b', '\\t', '\\n', '\\u000b', '\\f', '\\r', '\\u000e', '\\u000f', '\\u0010', + '\\u0011', '\\u0012', '\\u0013', '\\u0014', '\\u0015', '\\u0016', '\\u0017', '\\u0018', '\\u0019', + '\\u001a', '\\u001b', '\\u001c', '\\u001d', '\\u001e', '\\u001f']! + +const curly_open_rune = `{` + +const curly_close_rune = `}` + +const ascii_especial_characters = [u8(`\\`), `"`, `/`]! + +// // `Any` is a sum type that lists the possible types to be decoded and used. +// pub type Any = Null +// | []Any +// | bool +// | f32 +// | f64 +// | i16 +// | i32 +// | i64 +// | i8 +// | int +// | map[string]Any +// | string +// | time.Time +// | u16 +// | u32 +// | u64 +// | u8 + +// // Decodable is an interface, that allows custom implementations for decoding structs from JSON encoded values +// pub interface Decodable { +// from_json(f Any) +// } + +// Decodable is an interface, that allows custom implementations for encoding structs to their string based JSON representations +pub interface Encodable { + heroscript() string +} + +// `Null` struct is a simple representation of the `null` value in JSON. +pub struct Null { + is_null bool = true +} + +pub const null = Null{} + +// ValueKind enumerates the kinds of possible values of the Any sumtype. +pub enum ValueKind { + unknown + array + object + string_ + number +} + +// str returns the string representation of the specific ValueKind +pub fn (k ValueKind) str() string { + return match k { + .unknown { 'unknown' } + .array { 'array' } + .object { 'object' } + .string_ { 'string' } + .number { 'number' } + } +} diff --git a/lib/data/flist/block.v b/lib/data/flist/block.v new file mode 100644 index 00000000..19d67493 --- /dev/null +++ b/lib/data/flist/block.v @@ -0,0 +1,51 @@ +module flist + +@[table: 'block'] +pub struct Block { +pub mut: + ino u64 + id string + key string +} + +fn (mut f Flist) add_block(block Block) ! { + sql f.con { + insert block into Block + }! +} + +fn do_blocks_match(b1 []Block, b2 []Block) bool { + if b1.len != b2.len { + return false + } + + for i, b in b1 { + if b.key != b2[i].key || b.id != b2[i].id { + return false + } + } + + return true +} + +fn (mut f Flist) get_inode_blocks(ino u64) ![]Block { + blocks := sql f.con { + select from Block where ino == ino + }! + + return blocks +} + +fn (mut f Flist) delete_block(ino u64) ! { + f.con.exec_param('delete from block where ino = ?;', '${ino}')! +} + +// copy_blocks creates block a copy of the block entries related to src_ino and relates them to dest_ino +fn (mut f Flist) copy_blocks(src_ino u64, dest_ino u64) ! { + mut blocks := f.get_inode_blocks(src_ino)! + + for mut block in blocks { + block.ino = dest_ino + f.add_block(block)! + } +} diff --git a/lib/data/flist/extra.v b/lib/data/flist/extra.v new file mode 100644 index 00000000..7daec7f7 --- /dev/null +++ b/lib/data/flist/extra.v @@ -0,0 +1,38 @@ +module flist + +@[table: 'extra'] +pub struct Extra { +pub mut: + ino u64 + data string +} + +fn (mut f Flist) get_extra(ino u64) ?Extra { + extra := sql f.con { + select from Extra where ino == ino + } or { return none } + + if extra.len == 0 { + return none + } + + return extra[0] +} + +// copy_extra creates a copy of the extra record related to src_ino and relates it to dest_ino +fn (mut f Flist) copy_extra(src_ino u64, dest_ino u64) ! { + if mut extra := f.get_extra(src_ino) { + extra.ino = dest_ino + f.add_extra(extra)! + } +} + +fn (mut f Flist) delete_extra(ino u64) ! { + f.con.exec_param('delete from extra where ino = ?;', '${ino}')! +} + +fn (mut f Flist) add_extra(extra Extra) ! { + sql f.con { + insert extra into Extra + }! +} diff --git a/lib/data/flist/flist.v b/lib/data/flist/flist.v new file mode 100644 index 00000000..f514f02e --- /dev/null +++ b/lib/data/flist/flist.v @@ -0,0 +1,236 @@ +module flist + +import db.sqlite +import os +import time + +pub struct Flist { + path string + con sqlite.DB +} + +pub struct FlistGetArgs { + path string @[required] + create bool +} + +pub fn new(args FlistGetArgs) !Flist { + if args.create { + os.create(args.path)! + } + + con := sqlite.connect(args.path)! + con.journal_mode(sqlite.JournalMode.delete)! + + return Flist{ + path: args.path + con: con + } +} + +// list directories and files in root directory. if recursive is allowed directories are explored. +pub fn (mut f Flist) list(recursive bool) ![]Inode { + inodes := match recursive { + true { + res := sql f.con { + select from Inode + }! + res + } + false { + res := sql f.con { + select from Inode where parent == 1 + }! + res + } + } + + return inodes +} + +// copy copies an flist entry from source path to destination path. +pub fn (mut f Flist) copy(source string, destination string) ! { + dest := destination.trim_right('/') + + src_inode := f.get_inode_from_path(source)! + + if _ := f.get_inode_from_path(dest) { + return error('${dest} exists') + } + + dest_parent := if dest.contains('/') { + dest.all_before_last('/') + } else { + '' + } + + dest_parent_inode := f.get_inode_from_path(dest_parent)! + dest_inode := Inode{ + parent: dest_parent_inode.ino + ctime: time.now().unix() + mtime: time.now().unix() + mode: src_inode.mode + name: dest.all_after_last('/') + rdev: src_inode.rdev + size: src_inode.size + gid: u32(os.getgid()) + uid: u32(os.getuid()) + } + + f.add_inode(dest_inode)! + + dest_ino := u64(f.con.last_id()) + + f.copy_blocks(src_inode.ino, dest_ino)! + f.copy_extra(src_inode.ino, dest_ino)! + + children := f.get_inode_children(src_inode.ino)! + + for child in children { + f.copy(os.join_path(source, child.name), os.join_path(dest, child.name))! + } +} + +// delete file or directory from flist. path is relative to flist root directory. +// empty path will delete all flist entries. +// (actual data is not deleted, only flist information) +pub fn (mut f Flist) delete_path(path_ string) ! { + /* + delete from inode table and all related tables + */ + inode := f.get_inode_from_path(path_) or { + return error('failed to get inode from path: ${err}') + } + + f.delete_inode(inode.ino) or { return error('failed to delete inode: ${err}') } +} + +// delete_match deletes any entry that matches pattern. it simply calls find() and deletes matching inodes. +pub fn (mut f Flist) delete_match(pattern string) ! { + inodes := f.find(pattern)! + for inode in inodes { + f.delete_inode(inode.ino)! + } +} + +// merge merges two flists together. +// +// - copies all routes +// - copies all inodes with the following restrictions: +// - no two entries can have the same inode number. this can happen by shifting an flist's inode numbers to start after the last inode number of the other flist. +// - the changed flist should reflect the inode changes on all affected tables (extra and block). +// - no two files that exist in the same dir can have the same name. +// - if two files have the same name and have the same blocks, they are identical and won't be copied, otherwise, the incoming entry is renamed. +pub fn merge(source string, destination string) ! { + mut f_src := new(path: source)! + mut f_dest := new(path: destination)! + + f_dest.con.exec('BEGIN;')! + f_dest.merge_(mut f_src) or { + f_dest.con.exec('ROLLBACK;')! + return error('faild to merge flists: ${err}') + } + f_dest.con.exec('COMMIT;')! +} + +fn (mut f Flist) merge_(mut f_src Flist) ! { + src_routes := f_src.get_routes()! + f.add_routes(src_routes)! + + dest_last_inode_num := f.get_last_inode_number()! + mut next_inode_num := dest_last_inode_num + 1 + mut src_inodes := f_src.list(true)! + + for mut inode in src_inodes { + if inode.ino == 1 && inode.parent == 0 { + // this is root inode, not included in merge + continue + } + + mut src_blocks := f_src.get_inode_blocks(inode.ino)! + src_inode_path := f_src.get_inode_path(inode)! + // if entry is not a dir and has a match with an entry from destination + // then skip + if matching_dest_inode := f.get_inode_from_path(src_inode_path) { + // two entries exist with the same path: there might be a match, + // need to check blocks to make sure + if inode.mode == matching_dest_inode.mode { + if inode.mode == 16384 { + // this is a directory, skip it + continue + } + + dest_blocks := f.get_inode_blocks(matching_dest_inode.ino)! + if do_blocks_match(src_blocks, dest_blocks) { + continue + } + } + + // need to assign new name to incoming entry + mut new_name := '' + for i in 1 .. 100 { + new_src_inode_path := '${src_inode_path} (${i})' + if _ := f.get_inode_from_path(new_src_inode_path) { + continue + } else { + new_name = '${inode.name} (${i})' + break + } + } + + if new_name == '' { + return error('failed to assign new name to entry ${inode.name}') + } + + inode.name = new_name + } + + // get blocks for inode + for mut block in src_blocks { + block.ino = next_inode_num + f.add_block(block)! + } + + // get extras for inode + if mut extra := f_src.get_extra(inode.ino) { + extra.ino = next_inode_num + f.add_extra(extra)! + } + + inode.ino = next_inode_num + f.add_inode(inode)! + + next_inode_num += 1 + } + + src_tags := f_src.get_tags()! + for tag in src_tags { + f.add_tag(tag)! + } +} + +// find entries that match pattern, it uses the `LIKE` operator; +// The percent sign (%) matches zero or more characters and the underscore (_) matches exactly one. +pub fn (mut f Flist) find(pattern string) ![]Inode { + return f.find_inode_with_pattern(pattern) +} + +// update_routes will overwrite the current routes with the new routes +pub fn (mut f Flist) update_routes(new_routes []Route) ! { + f.delete_all_routes()! + + f.add_routes(new_routes)! +} + +// add_routes adds routes to the route table of the flist +pub fn (mut f Flist) add_routes(new_routes []Route) ! { + current_routes := f.get_routes()! + + for route in new_routes { + if current_routes.contains(route) { + continue + } + + f.add_route(route)! + } +} diff --git a/lib/data/flist/flist_test.v b/lib/data/flist/flist_test.v new file mode 100644 index 00000000..05cdf703 --- /dev/null +++ b/lib/data/flist/flist_test.v @@ -0,0 +1,289 @@ +module flist + +import os +import db.sqlite +import rand +import time + +fn testsuite_begin() { + schema := [ + 'CREATE TABLE inode ( + ino INTEGER PRIMARY KEY AUTOINCREMENT, + parent INTEGER, + name VARCHAR(255), + size INTEGER, + uid INTEGER, + gid INTEGER, + mode INTEGER, + rdev INTEGER, + ctime INTEGER, + mtime INTEGER +);', + 'CREATE INDEX parents ON inode (parent);', + 'CREATE INDEX names ON inode (name);', + 'CREATE TABLE extra ( + ino INTEGER PRIMARY KEY, + data VARCHAR(4096) +);', + 'CREATE TABLE block ( + ino INTEGER, + id VARCHAR(32), + key VARCHAR(32) +);', + 'CREATE INDEX block_ino ON block (ino);', + 'CREATE TABLE tag ( + key VARCHAR(10) PRIMARY KEY, + value VARCHAR(255) +);', + 'CREATE TABLE route ( + start integer, -- one byte hash prefix + end integer, -- one byte hash prefix + url VARCHAR(2048) +);', + ] + os.create('/tmp/fl1.fl')! + os.create('/tmp/fl2.fl')! + mut con1 := sqlite.connect('/tmp/fl1.fl')! + mut con2 := sqlite.connect('/tmp/fl2.fl')! + + con1.journal_mode(sqlite.JournalMode.delete)! + con2.journal_mode(sqlite.JournalMode.delete)! + + for schematic in schema { + con1.exec(schematic)! + con2.exec(schematic)! + } + + con1.close()! + con2.close()! +} + +fn testsuite_end() { + os.rm('/tmp/fl1.fl')! + os.rm('/tmp/fl2.fl')! +} + +fn test_list() { + mut fl := new(path: '/tmp/fl1.fl')! + input := insert_random_inodes(mut fl)! + list := fl.list(true)! + assert input == list +} + +fn insert_random_inodes(mut fl Flist) ![]Inode { + mut input := []Inode{} + input << Inode{ + ino: 1 + parent: 0 + name: '/' + size: rand.u64() % 100 + uid: rand.u32() % 10000 + gid: rand.u32() % 10000 + rdev: 0 + mode: 16384 + ctime: time.now().unix() + mtime: time.now().unix() + } + + for i in 2 .. 10 { + input << Inode{ + ino: i + parent: i % 3 + 1 + name: rand.string(5) + size: rand.u64() % 100 + uid: rand.u32() % 10000 + gid: rand.u32() % 10000 + rdev: 0 + mode: 32768 + ctime: time.now().unix() + mtime: time.now().unix() + } + } + + for inode in input { + fl.con.exec_param_many('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);', + ['${inode.ino}', '${inode.parent}', '${inode.name}', '${inode.size}', '${inode.uid}', + '${inode.gid}', '${inode.mode}', '${inode.rdev}', '${inode.ctime}', '${inode.mtime}'])! + } + + return input +} + +fn test_delete_path() { + mut fl := new(path: '/tmp/fl1.fl')! + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (100, 1, "dir", 0, 0, 0, 0, 0, 0, 0)')! + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (101, 100, "file", 0, 0, 0, 0, 0, 0, 0)')! + fl.con.exec('insert into block (ino, id, key) values (101, "asdf", "qwer");')! + fl.con.exec('insert into extra (ino, data) values (101, "data");')! + + fl.delete_path('dir/file')! + + assert fl.con.exec('select * from inode where ino = 101;')!.len == 0 + assert fl.con.exec('select * from block where ino = 101;')!.len == 0 + assert fl.con.exec('select * from extra where ino = 101;')!.len == 0 +} + +fn test_find() { + mut fl := new(path: '/tmp/fl1.fl')! + + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (100, 1, "dir1", 0, 0, 0, 0, 0, 0, 0)')! + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (101, 1, "dir2", 0, 0, 0, 0, 0, 0, 0)')! + + inodes := fl.find('dir%')! + assert inodes.len == 2 +} + +fn test_delete_match() { + mut fl := new(path: '/tmp/fl1.fl')! + + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (102, 1, "dir3", 0, 0, 0, 0, 0, 0, 0)')! + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (103, 1, "dir4", 0, 0, 0, 0, 0, 0, 0)')! + + fl.delete_match('dir%')! + + inodes := fl.find('dir%')! + assert inodes.len == 0 +} + +fn test_copy() { + mut fl := new(path: '/tmp/fl1.fl')! + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (1, 0, "/", 0, 0, 0, 0, 0, 0, 0)')! + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (104, 1, "dir1", 0, 0, 0, 0, 0, 0, 0)')! + fl.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (105, 104, "file1", 0, 0, 0, 0, 0, 0, 0)')! + fl.con.exec('insert into block (ino, id, key) values (105, "asdf", "qwer");')! + fl.con.exec('insert into extra (ino, data) values (105, "data");')! + + fl.copy('dir1', 'dir2')! + + dir2 := fl.find('dir2')! + assert dir2.len == 1 + + file1 := fl.find('file1')! + assert file1.len == 2 + + dir2file1 := if file1[0].ino == 105 { + file1[1] + } else { + file1[0] + } + + assert fl.con.exec_param('select * from block where ino = ?;', '${dir2file1.ino}')!.len == 1 + assert fl.con.exec_param('select * from extra where ino = ?;', '${dir2file1.ino}')!.len == 1 +} + +fn test_get_routes() { + mut fl := new(path: '/tmp/fl1.fl')! + + fl.con.exec('insert into route (start, end, url) values (0, 125, "dir:///tmp/store0")')! + fl.con.exec('insert into route (start, end, url) values (126, 255, "dir:///tmp/store1")')! + + want := [Route{ + start: 0 + end: 125 + url: 'dir:///tmp/store0' + }, Route{ + start: 126 + end: 255 + url: 'dir:///tmp/store1' + }] + assert fl.get_routes()! == want +} + +fn test_add_routes() { + mut fl := new(path: '/tmp/fl1.fl')! + + routes_to_add := [Route{ + start: 10 + end: 20 + url: 'dir:///tmp/store2' + }, Route{ + start: 20 + end: 30 + url: 'dir:///tmp/store3' + }] + fl.add_routes(routes_to_add)! + + found_routes := fl.get_routes()! + for route in routes_to_add { + assert found_routes.contains(route) + } +} + +fn test_update_routes() { + mut fl := new(path: '/tmp/fl1.fl')! + + updated_routes := [Route{ + start: 30 + end: 40 + url: 'dir:///tmp/store4' + }, Route{ + start: 50 + end: 60 + url: 'dir:///tmp/store6' + }, Route{ + start: 100 + end: 255 + url: 'dir:///tmp/store7' + }] + fl.update_routes(updated_routes)! + + assert updated_routes == fl.get_routes()! +} + +fn test_merge() { + mut fl1 := new(path: '/tmp/fl1.fl')! + mut fl2 := new(path: '/tmp/fl2.fl')! + + fl1.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (1, 0, "/", 0, 0, 0, 0, 0, 0, 0)')! + fl1.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (104, 1, "file1", 0, 0, 0, 0, 0, 0, 0)')! + fl1.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (105, 1, "file2", 0, 0, 0, 0, 0, 0, 0)')! + fl1.add_block(Block{ ino: 104, id: '1234', key: '1234' })! + fl1.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (106, 1, "dir1", 0, 0, 0, 16384, 0, 0, 0)')! + fl1.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (107, 106, "file3", 10, 0, 0, 0, 0, 0, 0)')! + fl1.add_block(Block{ ino: 107, id: '1234', key: '1234' })! + fl1.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (108, 106, "file4", 10, 0, 0, 0, 0, 0, 0)')! + + fl2.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (1, 0, "/", 0, 0, 0, 0, 0, 0, 0)')! + fl2.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (104, 1, "file1", 0, 0, 0, 0, 0, 0, 0)')! + fl2.add_block(Block{ ino: 104, id: '5678', key: '5678' })! + fl2.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (105, 1, "dir1", 0, 0, 0, 16384, 0, 0, 0)')! + fl2.con.exec('insert into inode (ino, parent, name, size, uid, gid, mode, rdev, ctime, mtime) values (106, 105, "file3", 10, 0, 0, 0, 0, 0, 0)')! + fl2.add_block(Block{ ino: 106, id: '1234', key: '1234' })! + + merge('/tmp/fl2.fl', '/tmp/fl1.fl')! + + list := fl1.list(true)! + + assert list.len == 7 + assert list.contains(Inode{ ino: 1, name: '/' }) + assert list.contains(Inode{ ino: 104, parent: 1, name: 'file1' }) + assert list.contains(Inode{ ino: 105, parent: 1, name: 'file2' }) + assert list.contains(Inode{ ino: 106, parent: 1, name: 'dir1', mode: 16384 }) + assert list.contains(Inode{ ino: 107, parent: 106, name: 'file3', size: 10 }) + assert list.contains(Inode{ ino: 108, parent: 106, name: 'file4', size: 10 }) + assert list.contains(Inode{ ino: 109, parent: 1, name: 'file1 (1)' }) + + mut blocks := fl1.get_inode_blocks(104)! + assert blocks.len == 1 + assert blocks[0] == Block{ + ino: 104 + id: '1234' + key: '1234' + } + + blocks = fl1.get_inode_blocks(107)! + assert blocks.len == 1 + assert blocks[0] == Block{ + ino: 107 + id: '1234' + key: '1234' + } + + blocks = fl1.get_inode_blocks(109)! + assert blocks.len == 1 + assert blocks[0] == Block{ + ino: 109 + id: '5678' + key: '5678' + } +} diff --git a/lib/data/flist/inode.v b/lib/data/flist/inode.v new file mode 100644 index 00000000..939e89e9 --- /dev/null +++ b/lib/data/flist/inode.v @@ -0,0 +1,131 @@ +module flist + +@[table: 'inode'] +pub struct Inode { +pub mut: + ino u64 @[primary; sql: serial] + parent u64 + name string + size u64 + uid u32 + gid u32 + mode u32 + rdev u64 + ctime i64 + mtime i64 +} + +fn (mut f Flist) add_inode(inode Inode) ! { + sql f.con { + insert inode into Inode + }! +} + +fn (mut f Flist) get_inode_children(parent u64) ![]Inode { + children := sql f.con { + select from Inode where parent == parent + }! + + return children +} + +fn (mut f Flist) find_inode_with_pattern(pattern string) ![]Inode { + inodes := sql f.con { + select from Inode where name like pattern + }! + + return inodes +} + +fn (mut f Flist) get_inode_from_path(path_ string) !Inode { + mut path := path_.trim('/') + + items := path.split('/') + root_inodes := sql f.con { + select from Inode where ino == 1 + }! + + if root_inodes.len != 1 { + return error('invalid flist: failed to get root directory inode') + } + + mut inode := root_inodes[0] + if path == '' { + return inode + } + + for item in items { + if item == '' { + return error('invalid path ${path_}') + } + + inodes := sql f.con { + select from Inode where name == item && parent == inode.ino + }! + + // at most only one entry should match + if inodes.len == 0 { + return error('file or directory ${item} does not exist in flist') + } + + inode = inodes[0] + } + + return inode +} + +fn (mut f Flist) get_inode_path(inode Inode) !string { + mut path := '' + mut cur_inode := inode + for cur_inode.ino != 1 { + path = '/${cur_inode.name}${path}' + cur_inode = f.get_inode(cur_inode.parent)! + } + + return path +} + +fn (mut f Flist) get_inode(ino u64) !Inode { + inode := sql f.con { + select from Inode where ino == ino + }! + + if inode.len == 0 { + return error('inode ${ino} was not found') + } + + return inode[0] +} + +// get_last_inode_number returns the biggest inode number in flist +fn (mut f Flist) get_last_inode_number() !u64 { + inodes := f.list(true)! + mut last_inode := u64(0) + for inode in inodes { + if inode.ino > last_inode { + last_inode = inode.ino + } + } + + return last_inode +} + +fn (mut f Flist) delete_inode(ino u64) ! { + // delete from block table + f.delete_block(ino)! + + // delete from extra table + f.delete_extra(ino)! + + // get children if any + children := sql f.con { + select from Inode where parent == ino + }! + + for child in children { + f.delete_inode(child.ino)! + } + + // delete inode + f.con.exec_param('delete from inode where ino = ?;', '${ino}')! +} diff --git a/lib/data/flist/route.v b/lib/data/flist/route.v new file mode 100644 index 00000000..805cbd63 --- /dev/null +++ b/lib/data/flist/route.v @@ -0,0 +1,27 @@ +module flist + +@[table: 'route'] +pub struct Route { +pub mut: + start u8 + end u8 + url string +} + +// get_routes returns all flist routes +pub fn (mut f Flist) get_routes() ![]Route { + routes := sql f.con { + select from Route + }! + return routes +} + +fn (mut f Flist) add_route(route Route) ! { + sql f.con { + insert route into Route + }! +} + +fn (mut f Flist) delete_all_routes() ! { + f.con.exec('delete from route;')! +} diff --git a/lib/data/flist/tag.v b/lib/data/flist/tag.v new file mode 100644 index 00000000..9477ed75 --- /dev/null +++ b/lib/data/flist/tag.v @@ -0,0 +1,22 @@ +module flist + +@[table: 'tag'] +pub struct Tag { +pub mut: + key string @[primary] + value string +} + +fn (mut f Flist) get_tags() ![]Tag { + tags := sql f.con { + select from Tag + }! + + return tags +} + +fn (mut f Flist) add_tag(tag Tag) ! { + sql f.con { + insert tag into Tag + }! +} diff --git a/lib/data/hjson/hjson.v b/lib/data/hjson/hjson.v index 1a34ac00..f77d7499 100644 --- a/lib/data/hjson/hjson.v +++ b/lib/data/hjson/hjson.v @@ -1,4 +1,4 @@ -module crystaljson +module herojson import x.json2 import freeflowuniverse.herolib.core.texttools diff --git a/lib/data/ipaddress/ipaddress.v b/lib/data/ipaddress/ipaddress.v new file mode 100644 index 00000000..38f398e7 --- /dev/null +++ b/lib/data/ipaddress/ipaddress.v @@ -0,0 +1,255 @@ +module ipaddress + +import os +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.ui.console + +pub struct IPNetwork { + IPAddress +} + +// specifies a range out of which e.g. ipaddresses can be chosen . +// note that checks need to be done to make sure that the IPAddresses are part of subnet as specified by parent object +pub struct IPNetworkRange { + IPAddress +pub mut: + from IPAddress + to IPAddress +} + +pub struct IPAddress { +pub mut: + addr string // e.g. 192.168.6.6 or x:x:x:x:x:x:x:x + mask int // e.g. 24, default not specified + cat IpAddressType = .ipv4 + description string + port int +} + +pub enum IpAddressType { + ipv4 + ipv6 + name +} + +// TODO: implementation not correct !!! + +// format: localhost:7777 +// format: localhost:7777/24 +// format: 192.168.6.6:7777 +// format: 192.168.6.6 +// format ipv6: [x:x:x:x:x:x:x:x]:p +// format ipv6: [x:x:x:x:x:x:x:x]:p/96 +// format ipv6: x:x:x:x:x:x:x:x +// format ipv6: x:x:x:x.../96 +pub fn new(addr_string string) !IPAddress { + mut cat := IpAddressType.ipv4 + mut addr := addr_string + mut port := '' + mut mask := 0 + + if addr_string.starts_with('localhost') { + addr = addr_string.replace('localhost', '127.0.0.1') + } + + if addr.contains('/') { + splitted := addr.split(addr) + if splitted.len == 2 { + mask = splitted[1].int() + addr = splitted[0] + } else { + return error('syntax error in ipaddr: ${addr}, should only have one /') + } + } + + // parse the ip addr + if addr.count(':') > 4 && !addr.contains('[') { + cat = IpAddressType.ipv6 + addr = addr.trim_space() + port = '0' + } else if addr.contains('[') && addr.count(']') == 1 { + post := addr.all_after_last(']').trim_space() + addr = addr.all_before_last(']').all_after_first('[').trim_space() + cat = IpAddressType.ipv6 + port = '0' + if post.len > 0 { + if post.contains(':') { + port = post.all_after(':').trim_space() + } else { + return error("syntax error in ip addr: '${addr}' should have : after ] if port") + } + } + } else if ipv4_check(addr) { + cat = IpAddressType.ipv4 + addr = addr.trim_space() + port = '0' + if addr.count(':') == 1 { + port = addr.all_after_last(':').trim_space() + addr = addr.all_before(':').trim_space() + } else if addr.count(':') > 1 { + return error('Invalid IP address string, port part \'${addr}\'') + } + } else if name_check(addr) { + cat = IpAddressType.name + addr = addr.trim_space() + if addr.count(':') == 1 { + port = addr.all_after_last(':').trim_space() + addr = addr.all_before(':').trim_space() + } else if addr.count(':') > 1 { + return error('Invalid IP address string, port part \'${addr}\'') + } + } else { + return error('Invalid IP address string \'${addr}\'') + } + + mut ip := IPAddress{ + addr: addr.trim_space() + port: port.int() + cat: cat + mask: mask + } + + // ip.check()! + + return ip +} + +@[params] +pub struct PingArgs { +pub mut: + retry int + timeout int +} + +// PingArgs: retry & timeout +// retry default 1 +// timeout default 1000 (msec) +pub fn (mut ipaddr IPAddress) ping(args_ PingArgs) bool { + mut args := args_ + if args.retry == 0 { + args.retry = 1 + } + if args.timeout == 0 { + args.timeout = 1000 + } + + mut timeout := int(args.timeout / 1000) + if timeout < 1 { + timeout = 1 + } + + mut cmd := '' + if ipaddr.cat == IpAddressType.ipv4 { + cmd = 'ping -c 1 -W ${args.timeout} ${ipaddr.addr}' + } else { + if osal.is_osx() { + cmd = 'ping6 -c 1 -i ${timeout} ${ipaddr.addr}' + } else { + cmd = 'ping -6 -c 1 -W ${args.timeout} ${ipaddr.addr}' + } + } + for _ in 0 .. args.retry { + console.print_debug(cmd) + res := os.execute(cmd) + if res.exit_code > 0 { + continue + } + return true + } + return false +} + +// check if ipv4 address is properly formatted as aaa.bbb.ccc.ddd +pub fn ipv4_check(addr_ string) bool { + mut addr := addr_ + if addr.contains(':') { + addr = addr.all_before(':') + } + if addr.count('.') != 3 { + return false + } + items := addr.split('.') + for item in items { + if !item.is_int() { + return false + } + i := item.int() + if i > 255 || i < 0 { + return false + } + } + if items.first().int() == 0 { + return false + } + if items.last().int() == 0 { + return false + } + return true +} + +pub fn name_check(addr_ string) bool { + mut addr := addr_.to_lower() + if addr.contains(':') { + addr = addr.all_before(':') + } + if addr.ends_with('.') || addr.starts_with('.') { + return false + } + if addr.count('.') < 1 { + return false + } + if addr.count('.') > 8 { + return false + } + for u in addr { + if u == 45 || u == 46 { + continue + } else if u > 47 && u < 58 { // see https://www.charset.org/utf-8 + continue + } else if u > 96 && u < 123 { + continue + } + return false + } + return true +} + +pub fn (mut ipaddr IPAddress) toname() !string { + if ipaddr.cat == IpAddressType.ipv4 { + return ipaddr.addr.replace('.', '_') + } + if ipaddr.cat == IpAddressType.name { + return ipaddr.addr.replace('.', '_') + } + return ipaddr.addr.replace(':', '_') +} + +pub fn (mut ipaddr IPAddress) address() !string { + if ipaddr.cat == IpAddressType.ipv4 { + if ipaddr.port > 0 { + if ipaddr.mask > 0 { + return error('cannot have mask when port specified') + } + return '${ipaddr.addr}:${ipaddr.port}' + } else { + if ipaddr.mask > 0 { + return '${ipaddr.addr}/${ipaddr.mask}' + } else { + return '${ipaddr.addr}' + } + } + } else { + if ipaddr.port > 0 { + if ipaddr.mask > 0 { + return error('cannot have mask when port specified') + } + return '[${ipaddr.addr}]:${ipaddr.port}' + } else { + if ipaddr.mask > 0 { + return '${ipaddr.addr}/${ipaddr.mask}' + } else { + return '${ipaddr.addr}' + } + } + } +} diff --git a/lib/data/ipaddress/ipaddress_test.v b/lib/data/ipaddress/ipaddress_test.v new file mode 100644 index 00000000..dbeeb3e9 --- /dev/null +++ b/lib/data/ipaddress/ipaddress_test.v @@ -0,0 +1,94 @@ +module ipaddress + +fn test_ip4f() { + assert ipv4_check('0.0.0.1') == false + assert ipv4_check('1.0.0.0') == false + assert ipv4_check('1.0.0.0.1') == false + assert ipv4_check('254.254.254.254') + assert ipv4_check('254.0.0.254') + assert ipv4_check('0a.0.0.1') == false + assert ipv4_check('a.0.0.1') == false + assert ipv4_check('.0.0.1') == false + assert ipv4_check('0.0.1') == false +} + +fn test_name() { + assert name_check('something.c00l') + assert name_check('something.c_ol') == false + assert name_check('something.c00l.') == false + assert name_check('.something.else') == false + assert name_check('something.else.s!s') == false + assert name_check('something.else.s?s') == false +} + +fn test_ping() { + mut addr := IPAddress{ + addr: '127.0.0.1' + } + assert addr.ping(timeout: 3) + assert addr.port == 0 +} + +fn test_ping_fails() { + mut addr := IPAddress{ + addr: '22.22.22.22' + } + assert addr.ping(timeout: 3) == false + assert addr.port == 0 + assert addr.addr == '22.22.22.22' +} + +fn test_ipv4a() { + mut addr := new('22.22.22.22') or { panic(err) } + assert addr.cat == .ipv4 + assert addr.port == 0 + assert addr.addr == '22.22.22.22' +} + +fn test_ipv4b() { + mut addr := new('22.22.22.22:33') or { panic(err) } + assert addr.addr == '22.22.22.22' + assert addr.cat == .ipv4 + assert addr.port == 33 +} + +fn test_ipv6() { + mut addr := new('202:6a34:cd78:b0d7:5521:8de7:218e:6680') or { panic(err) } + assert addr.cat == .ipv6 + assert addr.port == 0 + assert addr.ping(timeout: 3) == false +} + +fn test_ipv6b() { + mut addr := new('[202:6a34:cd78:b0d7:5521:8de7:218e:6680]') or { panic(err) } + assert addr.cat == .ipv6 + assert addr.port == 0 +} + +fn test_ipv6c() { + mut addr := new('[202:6a34:cd78:b0d7:5521:8de7:218e:6680]:22 ') or { panic(err) } + assert addr.cat == .ipv6 + assert addr.port == 22 + assert addr.addr == '202:6a34:cd78:b0d7:5521:8de7:218e:6680' +} + +fn test_name1() { + mut addr := new('test.com:33') or { panic(err) } + assert addr.addr == 'test.com' + assert addr.cat == .name + assert addr.port == 33 +} + +fn test_name2() { + mut addr := new('test.com') or { panic(err) } + assert addr.addr == 'test.com' + assert addr.cat == .name + assert addr.port == 0 +} + +fn test_name3() { + mut addr := new('www.a.b.c.test.com') or { panic(err) } + assert addr.addr == 'www.a.b.c.test.com' + assert addr.cat == .name + assert addr.port == 0 +} diff --git a/lib/data/jsonschema/README.md b/lib/data/jsonschema/README.md new file mode 100644 index 00000000..5d32e165 --- /dev/null +++ b/lib/data/jsonschema/README.md @@ -0,0 +1,37 @@ +# JSON Schema + +A V library for the JSON Schema model, and a few handy functions. + +## JSON Schema Model + +Defined [here](https://json-schema.org/), "JSON Schema is a declarative language that allows you to annotate and validate JSON documents." The model in this module provides a struct that can easily be encoded into a JSON Schema. + +## Generating a Schema + +The generate.v file provides functions that can generate JSONSchema from [codemodels](../codemodel/). This allows for easy generation of JSON Schema from structs, and is useful for generating schemas from parsed code in v. + +Example: +```go +struct_ := codemodel.Struct { + name: "Mystruct" + fields: [ + codemodel.StructField { + name: "myfield" + typ: "string" + } + ] +} +schema := struct_to_schema(struct_) +``` + +### Generating Schemas for Anonymous Structs + +The properties of a JSON Schema is a list of key value pairs, where keys represent the subschema's name and the value is the schema (or the reference to the schema which is defined elsewhere) of the property. This is analogous to the fields of a struct, which is represented by a field name and a type. + +It's good practice to define object type schemas separately and reference them in properties, especially if the same schema is used in multiple places. However, object type schemas can also be defined in property definitions. This may make sense if the schema is exclusively used as a property of a schema, similar to using an anonymous struct for the type definition of a field of a struct. + +As such, schema's generated from structs that declare anonymous structs as field types, include a schema definition in the property field. + +## Notes + +As [this issue](https://github.com/vlang/v/issues/15081) is still not resolved, a json schema cannot be decoded into the json schema structure defined in this module. As such, to decode json schema string into a structure the `pub fn decode(data str) !Schema` function defined in `decode.v` must be used. \ No newline at end of file diff --git a/lib/data/jsonschema/codegen.v b/lib/data/jsonschema/codegen.v new file mode 100644 index 00000000..584d3b20 --- /dev/null +++ b/lib/data/jsonschema/codegen.v @@ -0,0 +1,186 @@ +module jsonschema + +import freeflowuniverse.herolib.core.codemodel { Alias, Attribute, CodeItem, Struct, StructField, Type } + +const vtypes = { + 'integer': 'int' + 'string': 'string' +} + +pub fn (schema Schema) v_encode() !string { + module_name := 'schema.title.' + structs := schema.vstructs_encode()! + // todo: report bug: return $tmpl(...) + encoded := $tmpl('templates/schema.vtemplate') + return encoded +} + +// vstructs_encode encodes a schema into V structs. +// if a schema has nested object type schemas or defines object type schemas, +// recrusively encodes object type schemas and pushes to the array of structs. +// returns an array of schemas that have been encoded into V structs. +pub fn (schema Schema) vstructs_encode() ![]string { + mut schemas := []string{} + mut properties := '' + + // loop over properties + for name, property_ in schema.properties { + mut property := Schema{} + mut typesymbol := '' + + if property_ is Reference { + // if reference, set typesymbol as reference name + ref := property_ as Reference + typesymbol = ref.ref.all_after_last('/') + } else { + property = property_ as Schema + typesymbol = property.vtype_encode()! + // recursively encode property if object + // todo: handle duplicates + if property.typ == 'object' { + structs := property.vstructs_encode()! + schemas << structs + } + } + + properties += '\n\t${name} ${typesymbol}' + if name in schema.required { + properties += ' @[required]' + } + } + schemas << $tmpl('templates/struct.vtemplate') + return schemas +} + +// code_type generates a typesymbol for the schema +pub fn (schema Schema) vtype_encode() !string { + mut property_str := '' + if schema.typ == 'null' { + return '' + } + if schema.typ == 'object' { + if schema.title == '' { + return error('Object schemas must define a title.') + } + // todo: enfore uppercase + property_str = schema.title + } else if schema.typ == 'array' { + // todo: handle multiple item schemas + if schema.items is SchemaRef { + // items := schema.items as SchemaRef + if schema.items is Schema { + items_schema := schema.items as Schema + property_str = '[]${items_schema.typ}' + } + } + } else if schema.typ in vtypes.keys() { + property_str = vtypes[schema.typ] + } else if schema.title != '' { + property_str = schema.title + } else { + return error('unknown type `${schema.typ}` ') + } + return property_str +} + +pub fn (schema Schema) to_code() !CodeItem { + if schema.typ == 'object' { + return CodeItem(schema.to_struct()!) + } + if schema.typ in vtypes { + return Alias{ + name: schema.title + typ: Type{ + symbol: vtypes[schema.typ] + } + } + } + if schema.typ == 'array' { + if schema.items is SchemaRef { + if schema.items is Schema { + items_schema := schema.items as Schema + return Alias{ + name: schema.title + typ: Type{ + symbol: '[]${items_schema.typ}' + } + } + } else if schema.items is Reference { + items_ref := schema.items as Reference + return Alias{ + name: schema.title + typ: Type{ + symbol: '[]${items_ref.to_type_symbol()}' + } + } + } + } + } + return error('Schema typ ${schema.typ} not supported for code generation') +} + +pub fn (schema Schema) to_struct() !Struct { + mut fields := []StructField{} + + for key, val in schema.properties { + mut field := val.to_struct_field(key)! + if field.name in schema.required { + field.attrs << Attribute{ + name: 'required' + } + } + fields << field + } + + return Struct{ + name: schema.title + description: schema.description + fields: fields + } +} + +pub fn (schema SchemaRef) to_struct_field(name string) !StructField { + if schema is Reference { + return StructField{ + name: name + typ: Type{ + symbol: schema.to_type_symbol() + } + } + } else if schema is Schema { + mut field := StructField{ + name: name + description: schema.description + } + if schema.typ == 'object' { + // then is anonymous struct + field.anon_struct = schema.to_struct()! + return field + } else if schema.typ in vtypes { + field.typ.symbol = vtypes[schema.typ] + return field + } + return error('Schema typ ${schema.typ} not supported for code generation') + } + return error('Schema typ not supported for code generation') +} + +pub fn (sr SchemaRef) to_code() !Type { + return if sr is Reference { + sr.to_type() + } else { + Type{ + symbol: (sr as Schema).vtype_encode()! + } + } +} + +pub fn (ref Reference) to_type_symbol() string { + return ref.ref.all_after_last('/') +} + +pub fn (ref Reference) to_type() Type { + return Type{ + symbol: ref.to_type_symbol() + } +} diff --git a/lib/data/jsonschema/codegen_test.v b/lib/data/jsonschema/codegen_test.v new file mode 100644 index 00000000..dff91e24 --- /dev/null +++ b/lib/data/jsonschema/codegen_test.v @@ -0,0 +1,101 @@ +module jsonschema + +import freeflowuniverse.herolib.ui.console + +fn test_encode_simple() ! { + struct_str := ' +// person struct used for test schema encoding +struct TestPerson { + name string + age int +}' + + schema := Schema{ + schema: 'test' + title: 'TestPerson' + description: 'person struct used for test schema encoding' + typ: 'object' + properties: { + 'name': Schema{ + typ: 'string' + description: 'name of the test person' + } + 'age': Schema{ + typ: 'integer' + description: 'age of the test person' + } + } + } + encoded := schema.vstructs_encode()! + assert encoded.len == 1 + assert encoded[0].trim_space() == struct_str.trim_space() +} + +fn test_encode_schema_with_reference() ! { + struct_str := ' +// person struct used for test schema encoding +struct TestPerson { + name string + age int + friend Friend +}' + + schema := Schema{ + schema: 'test' + title: 'TestPerson' + description: 'person struct used for test schema encoding' + typ: 'object' + properties: { + 'name': Schema{ + typ: 'string' + description: 'name of the test person' + } + 'age': Schema{ + typ: 'integer' + description: 'age of the test person' + } + 'friend': Reference{ + ref: '#components/schemas/Friend' + } + } + } + encoded := schema.vstructs_encode()! + assert encoded.len == 1 + assert encoded[0].trim_space() == struct_str.trim_space() +} + +fn test_encode_recursive() ! { + schema := Schema{ + schema: 'test' + title: 'TestPerson' + description: 'person struct used for test schema encoding' + typ: 'object' + properties: { + 'name': Schema{ + typ: 'string' + description: 'name of the test person' + } + 'age': Schema{ + typ: 'integer' + description: 'age of the test person' + } + 'friend': Schema{ + title: 'TestFriend' + typ: 'object' + description: 'friend of the test person' + properties: { + 'name': Schema{ + typ: 'string' + description: 'name of the test friend person' + } + 'age': Schema{ + typ: 'integer' + description: 'age of the test friend person' + } + } + } + } + } + encoded := schema.vstructs_encode()! + console.print_debug(encoded) +} diff --git a/lib/data/jsonschema/decode.v b/lib/data/jsonschema/decode.v new file mode 100644 index 00000000..91cdf5ec --- /dev/null +++ b/lib/data/jsonschema/decode.v @@ -0,0 +1,53 @@ +module jsonschema + +import json +import x.json2 { Any } +import os +import freeflowuniverse.herolib.core.pathlib + +pub fn decode(data string) !Schema { + schema_map := json2.raw_decode(data)!.as_map() + mut schema := json.decode(Schema, data)! + for key, value in schema_map { + if key == 'properties' { + schema.properties = decode_schemaref_map(value.as_map())! + } else if key == 'additionalProperties' { + schema.additional_properties = decode_schemaref(value.as_map())! + } else if key == 'items' { + schema.items = decode_items(value)! + } + } + return schema +} + +pub fn decode_items(data Any) !Items { + if data.str().starts_with('{') { + return decode_schemaref(data.as_map())! + } + if !data.str().starts_with('[') { + return error('items field must either be list of schemarefs or a schemaref') + } + + mut items := []SchemaRef{} + for val in data.arr() { + items << decode_schemaref(val.as_map())! + } + return items +} + +pub fn decode_schemaref_map(data_map map[string]Any) !map[string]SchemaRef { + mut schemaref_map := map[string]SchemaRef{} + for key, val in data_map { + schemaref_map[key] = decode_schemaref(val.as_map())! + } + return schemaref_map +} + +pub fn decode_schemaref(data_map map[string]Any) !SchemaRef { + if '\$ref' in data_map { + return Reference{ + ref: data_map['\$ref'].str() + } + } + return decode(data_map.str())! +} diff --git a/lib/data/jsonschema/decode_test.v b/lib/data/jsonschema/decode_test.v new file mode 100644 index 00000000..59f249fe --- /dev/null +++ b/lib/data/jsonschema/decode_test.v @@ -0,0 +1,46 @@ +module jsonschema + +import json +import x.json2 +import os +import freeflowuniverse.herolib.core.pathlib + +const testdata = '${os.dir(@FILE)}/testdata' + +struct Pet { + name string +} + +fn test_decode() ! { + mut pet_schema_file := pathlib.get_file( + path: '${testdata}/pet.json' + )! + pet_schema_str := pet_schema_file.read()! + pet_schema := decode(pet_schema_str)! + assert pet_schema == Schema{ + typ: 'object' + properties: { + 'name': Schema{ + typ: 'string' + } + } + required: ['name'] + } +} + +fn test_decode_schemaref() ! { + mut pet_schema_file := pathlib.get_file( + path: '${testdata}/pet.json' + )! + pet_schema_str := pet_schema_file.read()! + pet_schemaref := decode(pet_schema_str)! + assert pet_schemaref == Schema{ + typ: 'object' + properties: { + 'name': Schema{ + typ: 'string' + } + } + required: ['name'] + } +} diff --git a/lib/data/jsonschema/generate.v b/lib/data/jsonschema/generate.v new file mode 100644 index 00000000..69a1a37a --- /dev/null +++ b/lib/data/jsonschema/generate.v @@ -0,0 +1,268 @@ +module jsonschema + +import freeflowuniverse.herolib.core.codemodel { Param, Result, Struct, Type } + +// struct_to_schema generates a json schema or reference from a struct model +pub fn sumtype_to_schema(sumtype codemodel.Sumtype) SchemaRef { + mut one_of := []SchemaRef{} + for type_ in sumtype.types { + property_schema := typesymbol_to_schema(type_.symbol) + one_of << property_schema + } + + title := sumtype.name + + return SchemaRef(Schema{ + title: title + description: sumtype.description + one_of: one_of + }) +} + +// struct_to_schema generates a json schema or reference from a struct model +pub fn struct_to_schema(struct_ Struct) SchemaRef { + mut properties := map[string]SchemaRef{} + for field in struct_.fields { + mut property_schema := SchemaRef(Schema{}) + if field.typ.symbol.starts_with('_VAnonStruct') { + property_schema = struct_to_schema(field.anon_struct) + } else { + property_schema = type_to_schema(field.typ) + } + if mut property_schema is Schema { + properties[field.name] = SchemaRef(Schema{ + ...property_schema + description: field.description + }) + } else { + properties[field.name] = property_schema + } + } + + title := if struct_.name.starts_with('_VAnonStruct') { + '' + } else { + struct_.name + } + + return SchemaRef(Schema{ + title: title + description: struct_.description + properties: properties + }) +} + +pub fn param_to_schema(param Param) SchemaRef { + if param.struct_ != Struct{} { + return struct_to_schema(param.struct_) + } + return typesymbol_to_schema(param.typ.symbol) +} + +pub fn result_to_schema(result Result) SchemaRef { + if result.structure != Struct{} { + return struct_to_schema(result.structure) + } + return typesymbol_to_schema(result.typ.symbol) +} + +// typesymbol_to_schema receives a typesymbol, if the typesymbol belongs to a user defined struct +// it returns a reference to the schema, else it returns a schema for the typesymbol +pub fn typesymbol_to_schema(symbol_ string) SchemaRef { + mut symbol := symbol_.trim_string_left('!').trim_string_left('?') + if symbol == '' { + return SchemaRef(Schema{ + typ: 'null' + }) + } else if symbol.starts_with('[]') { + mut array_type := symbol.trim_string_left('[]') + return SchemaRef(Schema{ + typ: 'array' + items: typesymbol_to_schema(array_type) + }) + } else if symbol.starts_with('map[string]') { + mut map_type := symbol.trim_string_left('map[string]') + return SchemaRef(Schema{ + typ: 'object' + additional_properties: typesymbol_to_schema(map_type) + }) + } else if symbol[0].is_capital() { + // todo: better imported type handling + if symbol == 'Uint128' { + return SchemaRef(Schema{ + typ: 'integer' + minimum: Number(0) + // todo: implement uint128 number + // maximum: Number('340282366920938463463374607431768211455') + }) + } + return SchemaRef(Reference{ + ref: '#/components/schemas/${symbol}' + }) + } else if symbol.starts_with('_VAnonStruct') { + return SchemaRef(Reference{ + ref: '#/components/schemas/${symbol}' + }) + } else { + if symbol == 'void' { + return SchemaRef(Schema{ + typ: 'null' + }) + } + if symbol == 'bool' { + return SchemaRef(Schema{ + typ: 'boolean' + }) + } + if symbol == 'int' { + return SchemaRef(Schema{ + typ: 'integer' + }) + } + if symbol == 'u8' { + return SchemaRef(Schema{ + typ: 'integer' + }) + } + if symbol == 'u16' { + return SchemaRef(Schema{ + typ: 'integer' + }) + } + if symbol == 'u32' { + return SchemaRef(Schema{ + typ: 'integer' + }) + } + if symbol == 'u64' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + if symbol == 'f32' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + if symbol == 'f64' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + if symbol == '!' { + return SchemaRef(Schema{ + typ: 'null' + }) + } + if symbol == 'i64' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + if symbol == 'byte' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + return SchemaRef(Schema{ + typ: symbol + }) + } +} + +pub fn type_to_schema(typ Type) SchemaRef { + mut symbol := typ.symbol.trim_string_left('!').trim_string_left('?') + if symbol == '' { + return SchemaRef(Schema{ + typ: 'null' + }) + } else if symbol.starts_with('[]') || typ.is_array { + mut array_type := symbol.trim_string_left('[]') + return SchemaRef(Schema{ + typ: 'array' + items: typesymbol_to_schema(array_type) + }) + } else if symbol.starts_with('map[string]') { + mut map_type := symbol.trim_string_left('map[string]') + return SchemaRef(Schema{ + typ: 'object' + additional_properties: typesymbol_to_schema(map_type) + }) + } else if symbol[0].is_capital() { + // todo: better imported type handling + if symbol == 'Uint128' { + return SchemaRef(Schema{ + typ: 'integer' + minimum: Number(0) + // todo: implement uint128 number + // maximum: Number('340282366920938463463374607431768211455') + }) + } + return SchemaRef(Reference{ + ref: '#/components/schemas/${symbol}' + }) + } else if symbol.starts_with('_VAnonStruct') { + return SchemaRef(Reference{ + ref: '#/components/schemas/${symbol}' + }) + } else { + if symbol == 'void' { + return SchemaRef(Schema{ + typ: 'null' + }) + } + if symbol == 'bool' { + return SchemaRef(Schema{ + typ: 'boolean' + }) + } + if symbol == 'int' { + return SchemaRef(Schema{ + typ: 'integer' + }) + } + if symbol == 'u8' { + return SchemaRef(Schema{ + typ: 'integer' + }) + } + if symbol == 'u16' { + return SchemaRef(Schema{ + typ: 'integer' + }) + } + if symbol == 'u32' { + return SchemaRef(Schema{ + typ: 'integer' + }) + } + if symbol == 'u64' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + if symbol == 'f64' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + if symbol == '!' { + return SchemaRef(Schema{ + typ: 'null' + }) + } + if symbol == 'i64' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + if symbol == 'byte' { + return SchemaRef(Schema{ + typ: 'string' + }) + } + return SchemaRef(Schema{ + typ: symbol + }) + } +} diff --git a/lib/data/jsonschema/generate_test.v b/lib/data/jsonschema/generate_test.v new file mode 100644 index 00000000..82e64f80 --- /dev/null +++ b/lib/data/jsonschema/generate_test.v @@ -0,0 +1,23 @@ +module jsonschema + +import freeflowuniverse.herolib.core.codemodel +import freeflowuniverse.herolib.ui.console + +fn test_struct_to_schema() { + struct_ := codemodel.Struct{ + name: 'test_name' + description: 'a codemodel struct to test struct to schema serialization' + fields: [ + codemodel.StructField{ + name: 'test_field' + description: 'a field of the test struct to test fields serialization into schema' + typ: codemodel.Type{ + symbol: 'string' + } + }, + ] + } + + schema := struct_to_schema(struct_) + console.print_debug(schema) +} diff --git a/lib/data/jsonschema/model.v b/lib/data/jsonschema/model.v new file mode 100644 index 00000000..70651984 --- /dev/null +++ b/lib/data/jsonschema/model.v @@ -0,0 +1,38 @@ +module jsonschema + +type Items = SchemaRef | []SchemaRef + +pub type SchemaRef = Reference | Schema + +pub struct Reference { +pub: + ref string @[json: 'ref'] +} + +type Number = int + +// https://json-schema.org/draft-07/json-schema-release-notes.html +pub struct Schema { +pub mut: + schema string @[json: 'schema'] + id string @[json: 'id'] + title string + description string + typ string @[json: 'type'] + properties map[string]SchemaRef + additional_properties SchemaRef @[json: 'additionalProperties'] + required []string + items Items + defs map[string]SchemaRef + one_of []SchemaRef @[json: 'oneOf'] + format string + // todo: make fields optional upon the fixing of https://github.com/vlang/v/issues/18775 + // from https://git.sr.ht/~emersion/go-jsonschema/tree/master/item/schema.go + // Validation for numbers + multiple_of int @[json: 'multipleOf'; omitempty] + maximum int @[omitempty] + exclusive_maximum int @[json: 'exclusiveMaximum'; omitempty] + minimum int @[omitempty] + exclusive_minimum int @[json: 'exclusiveMinimum'; omitempty] + enum_ []string @[json: 'enum'; omitempty] +} diff --git a/lib/data/jsonschema/reflection.v b/lib/data/jsonschema/reflection.v new file mode 100644 index 00000000..87a06678 --- /dev/null +++ b/lib/data/jsonschema/reflection.v @@ -0,0 +1 @@ +module jsonschema diff --git a/lib/data/jsonschema/templates/schema.vtemplate b/lib/data/jsonschema/templates/schema.vtemplate new file mode 100644 index 00000000..57b45e13 --- /dev/null +++ b/lib/data/jsonschema/templates/schema.vtemplate @@ -0,0 +1,7 @@ +module @module_name +// @schema.title +// @schema.description + +@for name in structs + @name +@end \ No newline at end of file diff --git a/lib/data/jsonschema/templates/struct.vtemplate b/lib/data/jsonschema/templates/struct.vtemplate new file mode 100644 index 00000000..cdca0242 --- /dev/null +++ b/lib/data/jsonschema/templates/struct.vtemplate @@ -0,0 +1,3 @@ +// @schema.description +struct @schema.title {@properties +} \ No newline at end of file diff --git a/lib/data/jsonschema/testdata/pet.json b/lib/data/jsonschema/testdata/pet.json new file mode 100644 index 00000000..97f52970 --- /dev/null +++ b/lib/data/jsonschema/testdata/pet.json @@ -0,0 +1,11 @@ +{ + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + } + } +} \ No newline at end of file diff --git a/lib/data/markdownparser/action_test.v b/lib/data/markdownparser/action_test.v new file mode 100644 index 00000000..c66952ee --- /dev/null +++ b/lib/data/markdownparser/action_test.v @@ -0,0 +1,187 @@ +module markdownparser + +import freeflowuniverse.herolib.data.paramsparser { Param, Params } +import freeflowuniverse.herolib.data.markdownparser.elements { Action } +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console + +fn test_action_empty_params() { + mut docs := new( + content: ' +!!farmerbot_powermanager.poweroff +' + )! + // console.print_debug(docs.children) + assert docs.children.len == 2 + action := docs.children[1] + if action is Action { + assert action.action.actor == 'farmerbot_powermanager' + assert action.action.name == 'poweroff' + assert action.action.params == Params{ + params: [] + args: [] + } + } else { + assert false, 'element ${docs.children[0]} is not an action' + } +} + +fn test_action_some_params_multiline() { + mut docs := new( + content: ' +!!farmerbot_nodemanager.define + id:15 + twinid:20 + has_public_ip:yes + has_public_config:1 +' + )! + + assert docs.children.len == 2 + action := docs.children[1] + // assert action.children.len == 3 + if action is Action { + assert action.action.actor == 'farmerbot_nodemanager' + assert action.action.name == 'define' + assert action.action.params == Params{ + params: [Param{ + key: 'twinid' + value: '20' + }, Param{ + key: 'has_public_ip' + value: 'yes' + }, Param{ + key: 'has_public_config' + value: '1' + }] + args: [] + } + } else { + assert false, 'element ${action} is not an action' + } +} + +fn test_action_some_params_inline() { + mut docs := new( + content: ' +!!farmerbot_nodemanager.define id:15 twinid:20 has_public_ip:yes has_public_config:1 +' + )! + + assert docs.children.len == 2 + action := docs.children[1] + if action is Action { + assert action.action.actor == 'farmerbot_nodemanager' + assert action.action.name == 'define' + assert action.action.params == Params{ + params: [Param{ + key: 'twinid' + value: '20' + }, Param{ + key: 'has_public_ip' + value: 'yes' + }, Param{ + key: 'has_public_config' + value: '1' + }] + args: [] + } + } else { + assert false, 'element ${action} is not an action' + } +} + +fn test_action_some_params_some_arguments_multi_line() { + mut docs := new( + content: ' +!!farmerbot_nodemanager.define + id:15 + has_public_config + has_public_ip:yes + is_dedicated +' + )! + + assert docs.children.len == 2 + action := docs.children[1] + if action is Action { + assert action.action.actor == 'farmerbot_nodemanager' + assert action.action.name == 'define' + assert action.action.params == Params{ + params: [Param{ + key: 'has_public_ip' + value: 'yes' + }] + args: ['has_public_config', 'is_dedicated'] + } + } else { + assert false, 'element ${action} is not an action' + } +} + +fn test_action_some_params_some_arguments_single_line() { + mut docs := new( + content: ' +!!farmerbot_nodemanager.define id:15 has_public_config has_public_ip:yes is_dedicated +' + )! + + assert docs.children.len == 2 + action := docs.children[1] + if action is Action { + assert action.action.actor == 'farmerbot_nodemanager' + assert action.action.name == 'define' + assert action.action.params == Params{ + params: [Param{ + key: 'has_public_ip' + value: 'yes' + }] + args: ['has_public_config', 'is_dedicated'] + } + } else { + assert false, 'element ${action} is not an action' + } +} + +fn test_action() { + mut c := ' + # header + + some text + + !!farmerbot.nodemanager_define + id:15 + twinid:20 + has_public_ip:yes + has_public_config:1 + + a line + + ``` + //in codeblock + !!farmerbot.nodemanager_delete + id:16 + ``` + + another line + + ```js + !!farmerbot.nodemanager_start id:17 + ``` + + + ' + c = texttools.dedent(c) + + mut doc := new(content: c)! + assert doc.actions().len == 3 + actions := doc.actions() + assert actions[0].actor == 'farmerbot' + assert actions[0].name == 'nodemanager_define' + + assert actions[1].actor == 'farmerbot' + assert actions[1].name == 'nodemanager_delete' + + assert actions[2].actor == 'farmerbot' + assert actions[2].name == 'nodemanager_start' +} diff --git a/lib/data/markdownparser/elements/base.v b/lib/data/markdownparser/elements/base.v new file mode 100644 index 00000000..371f2c35 --- /dev/null +++ b/lib/data/markdownparser/elements/base.v @@ -0,0 +1,241 @@ +module elements + +import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.core.smartid +// import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.core.playbook +// import freeflowuniverse.herolib.ui.console +// import freeflowuniverse.herolib.core.base + +@[heap] +pub struct DocBase { +mut: + parent_doc_ ?&Doc @[skip; str: skip] +pub mut: + id int + content string + path ?pathlib.Path + processed bool + // params paramsparser.Params + type_name string + changed bool + children []Element + // trailing_lf bool = true // NO LONGER NEEDED !!!! +} + +fn (mut self DocBase) process_base() ! { +} + +fn (mut self DocBase) parent_doc() &Doc { + mut pd := self.parent_doc_ or { + e := doc_new() or { panic('bug') } + &e + } + + return pd +} + +fn (mut self DocBase) remove_empty_children() { + self.children = self.children.filter(!(it.content == '' && it.children.len == 0 + && it.type_name in ['text', 'empty'])) +} + +pub fn (mut self DocBase) process() !int { + if self.processed { + return 0 + } + self.remove_empty_children() + self.process_base()! + self.process_children()! + self.content = '' // because now the content is in children + self.processed = true + return 1 +} + +@[params] +pub struct ActionsGetArgs { +pub mut: + actor string + name string +} + +// get all actions from the children +pub fn (self DocBase) actions(args ActionsGetArgs) []playbook.Action { + mut out := []playbook.Action{} + for element in self.children { + if element is Action { + mut found := true + if args.actor.len > 0 && args.actor != element.action.actor { + found = false + } + if args.name.len > 0 && args.name != element.action.name { + found = false + } + if found { + out << element.action + } + } else { + out << element.actions(args) + } + } + return out +} + +pub fn (self DocBase) header_name() !string { + for element in self.children { + if element is Header { + return element.content + } + } + return error("couldn't find header") +} + +pub fn (mut self DocBase) actionpointers(args ActionsGetArgs) []&Action { + mut out := []&Action{} + for mut element in self.children { + if mut element is Action { + mut found := true + if args.actor.len > 0 && args.actor != element.action.actor { + found = false + } + if args.name.len > 0 && args.name != element.action.name { + found = false + } + if found { + out << element + } + } + out << element.actionpointers(args) + } + return out +} + +pub fn (mut self DocBase) defpointers() []&Def { + mut out := []&Def{} + for mut element in self.children { + if mut element is Def { + out << element + } + out << element.defpointers() + } + return out +} + +pub fn (self DocBase) treeview() string { + mut out := []string{} + self.treeview_('', mut out) + return out.join_lines() +} + +pub fn (self DocBase) children() []Element { + return self.children +} + +pub fn (mut self DocBase) process_children() !int { + mut changes := 0 + for mut element in self.children { + changes += element.process() or { + return error('Failed to process child ${element.type_name}\n${err}') + } + } + return changes +} + +fn (self DocBase) treeview_(prefix string, mut out []string) { + mut c := self.content + c = c.replace('\n', '\\n') + if c.len > 80 { + c = c[0..80] + } + out << '${prefix}- ${self.id} : ${self.type_name:-30} ${c.len} \'${c}\'' + for mut element in self.children() { + element.treeview_(prefix + ' ', mut out) + } +} + +pub fn (self DocBase) html() !string { + mut out := '' + for mut element in self.children() { + out += element.html()! + } + return out +} + +// example see https://github.com/RelaxedJS/ReLaXed-examples/blob/master/examples/letter/letter.pug +// is to generate pdf's +pub fn (self DocBase) pug() !string { + mut out := '' + for mut element in self.children() { + out += element.pug()! + } + return out +} + +// the markdown which represents how it created the element +pub fn (self DocBase) markdown() !string { + mut out := '' + for mut element in self.children() { + out += element.markdown()! + // console.print_debug("+++++++++++${element.markdown()!}+++++++++++") + // if element.trailing_lf { + // out += '\n' + // } + } + return out +} + +pub fn (self DocBase) first() !Element { + if self.children.len == 0 { + return error('doc has no children') + } + mut l := self.children.first() + return l +} + +pub fn (self DocBase) last() !Element { + if self.children.len == 0 { + return error('doc has no children') + } + mut l := self.children.last() + return l +} + +pub fn (mut self DocBase) delete_last() ! { + if self.children.len == 0 { + return error('doc has no children') + } + + self.children.delete_last() +} + +pub fn (mut self DocBase) content_set(element_id int, c string) { + for mut element in self.children() { + if element.id == element_id { + element.content = c + } + element.content_set(element_id, c) + } +} + +pub fn (self DocBase) children_recursive() []Element { + mut elements := []Element{} + self.children_recursive_(mut elements) + return elements +} + +fn (self DocBase) children_recursive_(mut elements []Element) { + for element in self.children() { + elements << element + element.children_recursive_(mut elements) + } +} + +pub fn (mut self DocBase) id_set(latestid_ int) int { + mut latestid := latestid_ + latestid += 1 + self.id = latestid + for mut element in self.children() { + latestid = element.id_set(latestid) + } + return latestid +} diff --git a/lib/data/markdownparser/elements/base_add_methods.v b/lib/data/markdownparser/elements/base_add_methods.v new file mode 100644 index 00000000..7a9b38d3 --- /dev/null +++ b/lib/data/markdownparser/elements/base_add_methods.v @@ -0,0 +1,152 @@ +module elements + +pub fn (mut base DocBase) paragraph_new(mut docparent ?&Doc, content string) &Paragraph { + mut a := Paragraph{ + content: content + type_name: 'paragraph' + parent_doc_: docparent + } + + base.children << a + return &a +} + +pub fn (mut base DocBase) action_new(mut docparent ?&Doc, content string) &Action { + mut a := Action{ + content: content + type_name: 'action' + parent_doc_: docparent + } + base.children << a + return &a +} + +pub fn (mut base DocBase) table_new(mut docparent ?&Doc, content string) &Table { + mut a := Table{ + content: content + type_name: 'table' + parent_doc_: docparent + } + + base.children << a + return &a +} + +pub fn (mut base DocBase) header_new(mut docparent ?&Doc, content string) &Header { + mut a := Header{ + content: content + type_name: 'header' + parent_doc_: docparent + } + + base.children << a + return &a +} + +pub fn (mut base DocBase) list_new(mut docparent ?&Doc, content string) !&List { + mut a := List{ + type_name: 'list' + parent_doc_: docparent + } + a.add_list_item(content)! + + base.children << a + + return &a +} + +pub fn (mut base DocBase) list_item_new(mut docparent ?&Doc, content string) &ListItem { + mut a := ListItem{ + content: content + type_name: 'listitem' + parent_doc_: docparent + } + a.process() or { panic(err) } + base.children << a + return &a +} + +pub fn (mut base DocBase) text_new(mut docparent ?&Doc, content string) &Text { + mut a := Text{ + content: content + type_name: 'text' + parent_doc_: docparent + } + // a.trailing_lf = false + base.children << a + return &a +} + +pub fn (mut base DocBase) empty_new() &Empty { + mut a := Empty{} + // a.trailing_lf = false + base.children << a + return &a +} + +pub fn (mut base DocBase) comment_new(mut docparent ?&Doc, content string) &Comment { + mut a := Comment{ + content: content + type_name: 'comment' + parent_doc_: docparent + } + + base.children << a + return &a +} + +pub fn (mut base DocBase) codeblock_new(mut docparent ?&Doc, content string) &Codeblock { + mut a := Codeblock{ + content: content + type_name: 'codeblock' + parent_doc_: docparent + } + + base.children << a + return &a +} + +pub fn (mut base DocBase) frontmatter_new(mut docparent ?&Doc, content string) &Frontmatter { + mut fm := Frontmatter{ + content: content + type_name: 'frontmatter' + parent_doc_: docparent + } + + base.children << fm + return &fm +} + +pub fn (mut base DocBase) link_new(mut docparent ?&Doc, content string) &Link { + mut a := Link{ + content: content + type_name: 'link' + // trailing_lf: false + parent_doc_: docparent + } + // a.trailing_lf = false + base.children << a + return &a +} + +pub fn (mut base DocBase) html_new(mut docparent ?&Doc, content string) &Html { + mut a := Html{ + content: content + type_name: 'html' + parent_doc_: docparent + } + + base.children << a + return &a +} + +pub fn (mut base DocBase) def_new(mut docparent ?&Doc, content string) &Def { + mut a := Def{ + content: content + type_name: 'def' + // trailing_lf: false + parent_doc_: docparent + } + base.children << a + return &a +} diff --git a/lib/data/markdownparser/elements/char_parser_test.v b/lib/data/markdownparser/elements/char_parser_test.v new file mode 100644 index 00000000..90713545 --- /dev/null +++ b/lib/data/markdownparser/elements/char_parser_test.v @@ -0,0 +1,233 @@ +module elements + +import freeflowuniverse.herolib.ui.console + +fn test_charparser1() { + mut txt := '' + mut p2 := Paragraph{ + content: txt + } + + p2.paragraph_parse()! + p2.process_base()! + assert p2.content == '' + assert p2.children.len == 0 + assert p2.changed == false +} + +fn test_charparser2() { + mut txt := 'abc' + + mut p := parser_char_new_text(txt) + + p.forward(0) + assert p.char_current() == 'a' + + p.forward(1) + assert p.char_current() == 'b' + + p.forward(1) + assert p.char_current() == 'c' + + p.charnr = 1 + assert p.char_current() == 'b' + + assert p.char_next() == 'c' + assert p.char_prev() == 'a' + + assert p.text_next_is('c', 1) + assert p.text_next_is('c', 0) == false + assert p.text_next_is('b', 0) == true + + assert p.text_next_is('bc', 0) == true + assert p.text_next_is('bcs', 0) == false + assert p.text_next_is('ab', 0) == false + + p.charnr = 0 + assert p.text_next_is('abc', 0) == true + assert p.text_next_is('abc', 1) == false + assert p.text_next_is('bc', 1) == true + assert p.text_next_is('c', 2) == true +} + +fn test_charparser3_error() { + mut txt := '![' + mut p2 := Paragraph{ + content: txt + } + + p2.paragraph_parse()! + p2.process_base()! + p2.process_children()! + // TODO decide what to do in this case + assert p2.content == '![' + assert p2.children.len == 1 + ln := p2.children[0] + assert ln is Link + if ln is Link { + assert ln.id == 0 + assert ln.processed == true + assert ln.type_name == 'link' + assert ln.cat == .image + assert ln.state == .error + assert ln.error_msg.contains('any link starting with ! needs to be image') + } +} + +fn test_charparser_link() { + mut txt := '![a](b.png)' + mut p2 := Paragraph{ + content: txt + } + mut doc := Doc{} + p2.paragraph_parse()! + p2.process_base()! + p2.process_children()! + + assert p2.children.len == 1 + + ln := p2.children[0] + console.print_debug('${ln}') + assert ln is Link + if ln is Link { + assert ln.id == 0 + assert ln.type_name == 'link' + assert ln.markdown()! == '![a](b.png)' + assert ln.content == '' + assert ln.cat == .image + assert ln.description == 'a' + assert ln.url == 'b.png' + assert ln.filename == 'b.png' + assert ln.state == .init + } +} + +fn test_charparser_link_error() { + mut txt := '![a](b)' + mut p2 := Paragraph{ + content: txt + } + p2.process()! + assert p2.children.len == 1 + + ln := p2.children[0] + assert ln.children.len == 0 + + console.print_debug('${ln}') + assert ln is Link + if ln is Link { + assert ln.id == 0 + assert ln.type_name == 'link' + assert ln.content == '' + assert ln.cat == .image + assert ln.description == 'a' + assert ln.url == 'b' + assert ln.filename == 'b' + assert ln.state == .error + assert ln.error_msg.contains('any link starting with ! needs to be image') + } +} + +fn test_charparser_link_trailing_spaces() { + mut txt := '[a](b) ' + mut p2 := Paragraph{ + content: txt + } + p2.process()! + console.print_debug('${p2}') + + assert p2.children.len == 2 + assert p2.children[0].markdown()! == '[a](b.md)' + assert p2.children.last().markdown()! == ' ' + assert p2.children.last().type_name == 'text' +} + +fn test_charparser_link_ignore_trailing_newlines() { + mut txt := '[a](b)\n \n' + mut p2 := Paragraph{ + content: txt + } + p2.process()! + console.print_debug('${p2}') + + assert p2.children.len == 2 + + assert p2.children.len == 2 + assert p2.children[0].markdown()! == '[a](b.md)' + assert p2.children.last().markdown()! == '\n \n' + assert p2.children.last().type_name == 'text' +} + +fn test_charparser_link_comment_text() { + mut txt := ' +![a](b.jpg) //comment +sometext +' + mut p2 := Paragraph{ + content: txt + } + + p2.process()! + console.print_debug('${p2}') + + assert p2.children.len == 5 + + assert p2.children[1] is Link + item_1 := p2.children[1] + if item_1 is Link { + assert item_1.cat == .image + assert item_1.filename == 'b.jpg' + assert item_1.description == 'a' + } + + assert p2.children[3] is Comment + item_2 := p2.children[3] + if item_2 is Comment { + assert item_2.content == 'comment' + } + + assert p2.children[4] is Text + assert p2.children[4].content == '\nsometext\n' +} + +fn test_charparser_link_multilinecomment_text() { + mut txt := '![a](b.jpg) + +sometext' + mut p2 := Paragraph{ + content: txt + } + + p2.process()! + console.print_debug('${p2}') + + assert p2.children.len == 5 + + assert p2.children[0] is Link + item_1 := p2.children[0] + if item_1 is Link { + assert item_1.cat == .image + assert item_1.filename == 'b.jpg' + assert item_1.description == 'a' + assert item_1.markdown()! == '![a](b.jpg)' + } + + assert p2.children[1] is Comment + item_2 := p2.children[1] + if item_2 is Comment { + assert item_2.content == 'comment1' + assert item_2.singleline == false + } + + assert p2.children[3] is Comment + item_4 := p2.children[3] + if item_4 is Comment { + assert item_4.content == 'comment2' + assert item_4.singleline == false + } + + assert p2.children[4] is Text + assert p2.children[4].content == '\nsometext' + + assert txt == p2.markdown()! +} diff --git a/lib/data/markdownparser/elements/doc.v b/lib/data/markdownparser/elements/doc.v new file mode 100644 index 00000000..14629b84 --- /dev/null +++ b/lib/data/markdownparser/elements/doc.v @@ -0,0 +1,81 @@ +module elements + +import freeflowuniverse.herolib.core.texttools + +@[heap] +pub struct Doc { + DocBase +pub mut: + // gid smartid.GID + pre []HtmlSource + linked_pages []string // to know which collection:pages are needed to make this doc complete + collection_name string +} + +// add a css or script link to a document +// url: is source where the data comes from, can be CDN or local link +// path: can be relative or absolute path to the info +// bookname, if in memory in a book +// chaptername, if in memory in a book +// filename string, if in memory in a book +// cat, is .css or .script +pub fn (mut self Doc) pre_add(arg HtmlSource) string { + return '' +} + +@[param] +pub struct HtmlSource { +pub mut: + url string + path string + bookname string + chaptername string + filename string + cat HtmlSourceCat +} + +enum HtmlSourceCat { + css + script +} + +@[params] +pub struct DocNewArgs { +pub mut: + pre []HtmlSource + content string + collection_name string +} + +pub fn doc_new(args DocNewArgs) !Doc { + mut d := Doc{ + pre: args.pre + collection_name: args.collection_name + } + return d +} + +pub fn (mut self Doc) process() !int { + if self.processed { + return 0 + } + self.remove_empty_children() + self.process_base()! + self.process_children()! + self.id_set(0) + self.content = '' // because now the content is in children + return 1 +} + +// pub fn (self Doc) markdown()! string { +// return "" +// } + +pub fn (self Doc) html() !string { + return "${self.DocBase.html()!}" +} + +pub fn (self Doc) pug() !string { + return ":markdown-it(linkify langPrefix='highlight-')\n${texttools.indent(self.markdown()!, + ' ')}" +} diff --git a/lib/data/markdownparser/elements/element_action.v b/lib/data/markdownparser/elements/element_action.v new file mode 100644 index 00000000..34ad3c3d --- /dev/null +++ b/lib/data/markdownparser/elements/element_action.v @@ -0,0 +1,45 @@ +module elements + +import freeflowuniverse.herolib.core.playbook + +@[heap] +pub struct Action { + DocBase +pub mut: + action playbook.Action + action_processed bool +} + +// we don't allow an action to make child processes +pub fn (mut self Action) process() !int { + if self.processed { + return 0 + } + // Should we process actions here? + // p := playbook.new(text: self.content)! + // if p.actions.len != 1 { + // return error('a single action is expected, but found ${p.actions.len}') + // } + // self.action = p.actions[0] + self.processed = true + self.content = '' + return 1 +} + +pub fn (self Action) markdown() !string { + assert self.processed // needs to be processed before getting the markdown + // if content set then we know the action was processed + if self.action_processed || self.content != '' { + return self.content + } + // not processed so return the original heroscript + return self.action.heroscript() +} + +pub fn (self Action) html() !string { + return error('cannot return html, because there should be no actions left once we get to html') +} + +pub fn (self Action) pug() !string { + return error('cannot return html, because there should be no actions left once we get to html') +} diff --git a/lib/data/markdownparser/elements/element_codeblock.v b/lib/data/markdownparser/elements/element_codeblock.v new file mode 100644 index 00000000..7ed8918a --- /dev/null +++ b/lib/data/markdownparser/elements/element_codeblock.v @@ -0,0 +1,60 @@ +module elements + +import freeflowuniverse.herolib.core.playbook + +@[heap] +pub struct Codeblock { + DocBase +pub mut: + category string +} + +pub fn (mut self Codeblock) process() !int { + if self.processed { + return 0 + } + // QUESTION: should we process actions here? + // mut pb := playbook.new(text: self.content)! + // if pb.actions.len > 0 { + // for action in pb.actions { + // mut a := self.action_new(mut self.parent_doc(), '') + // a.action = action + // a.processed = true + // a.content = action.heroscript() + // } + // // now see if there is something left in codeblock, if yes add that one to the parent_elements + // if pb.othertext.len > 0 { + // self.content = pb.othertext + // } + // self.content = '' // because is now in the children + // } + self.process_children()! + self.processed = true + return 1 +} + +pub fn (self Codeblock) markdown() !string { + mut out := '' + out += '```${self.category}\n' + + for action in self.actions() { + out += action.str() + '\n' + } + if self.content.len > 0 { + out += self.content.trim_space() + out += '\n```' + } else { + out += '```' + } + return out +} + +pub fn (self Codeblock) html() !string { + panic('implement') + // TODO: implement html + return '' +} + +pub fn (self Codeblock) pug() !string { + return error('cannot return pug, not implemented') +} diff --git a/lib/data/markdownparser/elements/element_comment.v b/lib/data/markdownparser/elements/element_comment.v new file mode 100644 index 00000000..27c5e111 --- /dev/null +++ b/lib/data/markdownparser/elements/element_comment.v @@ -0,0 +1,34 @@ +module elements + +@[heap] +pub struct Comment { + DocBase +pub mut: + replaceme string + singleline bool +} + +pub fn (mut self Comment) process() !int { + if self.processed { + return 0 + } + self.processed = true + return 1 +} + +pub fn (self Comment) markdown() !string { + mut out := '' + return out +} + +pub fn (self Comment) html() !string { + mut out := self.content + out += self.DocBase.html()! + return out +} + +pub fn (self Comment) pug() !string { + return error('cannot return pug, not implemented') +} diff --git a/lib/data/markdownparser/elements/element_def.v b/lib/data/markdownparser/elements/element_def.v new file mode 100644 index 00000000..b99b7504 --- /dev/null +++ b/lib/data/markdownparser/elements/element_def.v @@ -0,0 +1,55 @@ +module elements + +@[heap] +pub struct Def { + DocBase +pub mut: + pagekey string + pagename string + nameshort string +} + +pub fn (mut self Def) process() !int { + if self.processed { + return 0 + } + if self.nameshort == '' { + if self.content == '' { + return error('cannot get name content should not be empty.') + } + self.nameshort = self.content.to_lower().replace('_', '').trim('*') + } + self.processed = false + return 1 +} + +pub fn (mut self Def) process_link() ! { + // self.trailing_lf = false + self.link_new(mut self.parent_doc(), '[${self.pagename}](${self.pagekey})') + self.process_children()! + self.content = '' + self.processed = true +} + +pub fn (self Def) markdown() !string { + if !self.processed { + // return error('cannot do markdown for ${self} as long as not processed') + return self.content + } + + return self.DocBase.markdown() // for children +} + +pub fn (self Def) html() !string { + if !self.processed { + return error('cannot do markdown for ${self} as long as not processed') + } + return self.DocBase.html() // for children +} + +pub fn (self Def) pug() !string { + if !self.processed { + return error('cannot do markdown for ${self} as long as not processed') + } + return self.DocBase.pug() // for children +} diff --git a/lib/data/markdownparser/elements/element_empty.v b/lib/data/markdownparser/elements/element_empty.v new file mode 100644 index 00000000..0fae390e --- /dev/null +++ b/lib/data/markdownparser/elements/element_empty.v @@ -0,0 +1,66 @@ +module elements + +import freeflowuniverse.herolib.core.playbook + +// NOT USED FOR NOW + +@[heap] +pub struct Empty { +pub mut: + id int + content string + processed bool + type_name string + changed bool + // trailing_lf bool + children []Element +} + +pub fn (mut self Empty) process() !int { + return 0 +} + +pub fn (self Empty) markdown() !string { + return '' +} + +pub fn (self Empty) pug() !string { + return '' +} + +pub fn (self Empty) html() !string { + return '' +} + +pub fn (self Empty) treeview_(prefix string, mut out []string) { +} + +pub fn (self Empty) children_recursive() []Element { + return []Element{} +} + +pub fn (self Empty) children_recursive_(mut e []Element) { +} + +pub fn (self Empty) content_set(i int, b string) { +} + +pub fn (self Empty) id_set(i int) int { + return 0 +} + +pub fn (self Empty) actionpointers(args ActionsGetArgs) []&Action { + return []&Action{} +} + +pub fn (self Empty) defpointers() []&Def { + return []&Def{} +} + +pub fn (self Empty) header_name() !string { + return '' +} + +pub fn (self Empty) actions(args ActionsGetArgs) []playbook.Action { + return []playbook.Action{} +} diff --git a/lib/data/markdownparser/elements/element_frontmatter.v b/lib/data/markdownparser/elements/element_frontmatter.v new file mode 100644 index 00000000..92603af5 --- /dev/null +++ b/lib/data/markdownparser/elements/element_frontmatter.v @@ -0,0 +1,71 @@ +module elements + +import toml + +// Frontmatter struct +@[heap] +pub struct Frontmatter { + DocBase +pub mut: + doc toml.Doc // Stores the parsed TOML document +} + +pub fn (mut self Frontmatter) process() !int { + if self.processed { + return 0 + } + // Parse the TOML frontmatter content into a toml.Doc + self.doc = toml.parse_text(self.content) or { + return error('Failed to parse TOML frontmatter: ${err.msg()}') + } + // Clear content after parsing + self.content = '' + self.processed = true + return 1 +} + +pub fn (self Frontmatter) markdown() !string { + mut out := '+++\n' + // Convert the TOML document back to string + for key, value in self.doc.to_any().as_map() { + out += '${key} = ${value.to_toml()}\n' + } + out += '+++' + return out +} + +pub fn (self Frontmatter) html() !string { + mut out := '
\n' + for key, value in self.doc.to_any().as_map() { + out += '

${key}: ${value.string()}

\n' + } + out += '
' + return out +} + +pub fn (self Frontmatter) pug() !string { + mut out := '' + out += 'div(class="frontmatter")\n' + for key, value in self.doc.to_any().as_map() { + out += ' p\n' + out += ' strong ${key}: ${value.string()}\n' + } + return out +} + +pub fn (self Frontmatter) get_value(key string) !toml.Any { + // Retrieve a value using a query string + return self.doc.value_opt(key) or { return error('Key "${key}" not found in frontmatter') } +} + +pub fn (self Frontmatter) get_string(key string) !string { + return self.get_value(key)!.string() +} + +pub fn (self Frontmatter) get_bool(key string) !bool { + return self.get_value(key)!.bool() +} + +pub fn (self Frontmatter) get_int(key string) !int { + return self.get_value(key)!.int() +} diff --git a/lib/data/markdownparser/elements/element_header.v b/lib/data/markdownparser/elements/element_header.v new file mode 100644 index 00000000..6abd8b7e --- /dev/null +++ b/lib/data/markdownparser/elements/element_header.v @@ -0,0 +1,32 @@ +module elements + +@[heap] +pub struct Header { + DocBase +pub mut: + depth int +} + +pub fn (mut self Header) process() !int { + if self.processed { + return 0 + } + self.processed = true + return 1 +} + +pub fn (self Header) markdown() !string { + mut h := '' + for _ in 0 .. self.depth { + h += '#' + } + return '${h} ${self.content}' +} + +pub fn (self Header) html() !string { + return '${self.content}\n' +} + +pub fn (self Header) pug() !string { + return error('cannot return pug, not implemented') +} diff --git a/lib/data/markdownparser/elements/element_html.v b/lib/data/markdownparser/elements/element_html.v new file mode 100644 index 00000000..de470aae --- /dev/null +++ b/lib/data/markdownparser/elements/element_html.v @@ -0,0 +1,34 @@ +module elements + +@[heap] +pub struct Html { + DocBase +pub mut: + replaceme string +} + +pub fn (mut self Html) process() !int { + if self.processed { + return 0 + } + self.processed = true + return 1 +} + +pub fn (self Html) markdown() !string { + mut out := '\n' + out += self.content + out += '\n' + out += self.DocBase.markdown()! + return out +} + +pub fn (self Html) pug() !string { + return error('cannot return pug, not implemented') +} + +pub fn (self Html) html() !string { + mut out := self.content + out += self.DocBase.html()! + return out +} diff --git a/lib/data/markdownparser/elements/element_link.v b/lib/data/markdownparser/elements/element_link.v new file mode 100644 index 00000000..d4554a0d --- /dev/null +++ b/lib/data/markdownparser/elements/element_link.v @@ -0,0 +1,382 @@ +module elements + +import freeflowuniverse.herolib.core.texttools +import os +// import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct Link { + DocBase +pub mut: + cat LinkType + isexternal bool // is not linked to a wiki (sites) + include bool // means we will not link to the remote location, content will be shown in context of local site + newtab bool // means needs to be opened on a new tab + moresites bool // this means we can look for the content on multiple source sites, site does not have to be specified + description string + url string + anchor string + // identification of link: + filename string // is the name of the page/file where the link points too + path string // is path in the site + site string // is the sitename where the link points too (collection) + extra string // e.g. ':size=800x900' + // internal + state LinkState + error_msg string +} + +pub enum LinkType { + file + image + page + unknown + html + data + email + anchor + code +} + +pub enum LinkState { + init // the original state (prob means already processed) + linkprocessed // means we have found the original information + ok + missing + error +} + +pub fn (mut self Link) process() !int { + if self.processed { + return 0 + } + // self.trailing_lf = false + self.parse() + for mut child in self.children { + child.process()! + } + self.processed = true + self.content = '' + return 1 +} + +fn (self Link) markdown_include() string { + // console.print_debug(" ----- LINK MARKDOWN INCLUDE ${self.url} ${self.cat}") + pd := self.parent_doc_ or { panic('bug there should always be parent_doc') } + + mut link_filename := self.filename + + if self.site != '' { + link_filename = '${self.site}:${link_filename}' + } else if pd.collection_name != '' { + link_filename = '${pd.collection_name}:${link_filename}' + } else { + // only add pathname if there is no site (collection) known + if self.path != '' { + link_filename = '${self.path}/${link_filename}' + } + } + + anchor := if self.anchor != '' { '#${self.anchor}' } else { '' } + mut out := '' + if self.cat == LinkType.page || self.cat == LinkType.file || self.cat == LinkType.image + || self.cat == LinkType.code { + mut pre := '' + if self.cat == LinkType.image { + pre = '!' + } + + if self.extra.trim_space() == '' { + out = '${pre}[${self.description}](${link_filename}${anchor})' + } else { + out = '${pre}[${self.description}](${link_filename}${anchor} ${self.extra})' + } + } else if self.cat == .anchor { + out = '[${self.description}](${anchor})' + } else if self.cat == LinkType.html || self.cat == LinkType.data || self.cat == LinkType.email { + out = '[${self.description}](${self.url}${anchor})' + } else { + panic('bug') + } + return out +} + +pub fn (self Link) markdown() !string { + if self.url.contains('header_new') { + print_backtrace() + } + if self.state == .init { + // means we need to give link before it was processed to resolve the link e.g. in doctree + return self.markdown_include() + } + + // represent description as link if there is link child, might be processed + description := if self.children.len == 1 && self.children[0] is Link { + self.children[0].markdown()! + } else { + self.description + } + + mut link_filename := self.filename + + anchor := if self.anchor != '' { '#${self.anchor}' } else { '' } + mut out := '' + if self.cat == LinkType.page || self.cat == LinkType.code || self.cat == LinkType.file + || self.cat == LinkType.image { + if self.filename.contains(':') { + return error("should not have ':' in link for image, page or file.\n${self}") + } + if self.path != '' { + link_filename = '${self.path}/${link_filename}' + } + mut pre := '' + if self.cat == LinkType.image { + pre = '!' + } + + if self.extra.trim_space() == '' { + out = '${pre}[${description}](${link_filename}${anchor})' + } else { + out = '${pre}[${description}](${link_filename}${anchor} ${self.extra})' + } + } else if self.cat == .anchor { + out = '[${self.description}](${anchor})' + } else if self.cat == LinkType.html || self.cat == LinkType.email { + out = '[${description}](${self.url}${anchor})' + } else { + panic('bug, LinkType ${self.cat} to markdown not implemented') + } + + return out +} + +pub fn (self Link) html() !string { + return match self.cat { + .image { '${self.description}' } + else { '${self.description}' } + } +} + +pub fn (self Link) pug() !string { + return error('cannot return pug, not implemented') +} + +// return path of the filename in the site +pub fn (mut link Link) pathfull() string { + mut r := '${link.path}/${link.filename}' + r = r.trim_left('/') + return r +} + +fn (mut link Link) error(msg string) { + link.state = LinkState.error + link.error_msg = msg +} + +// return the name of the link +pub fn (mut link Link) name_fix_no_underscore_no_ext() string { + return texttools.name_fix_no_underscore_no_ext(link.filename) + // return link.filename.all_before_last('.').trim_right('_').to_lower() +} + +fn (mut link Link) parse() { + link.content = link.content.trim_space() + if link.content.starts_with('!') { + link.cat = .image + } + link.description = link.content.all_after('[').all_before_last(']').trim_space() + link.url = link.content.all_after('](').all_before(')').trim_space() + if link.url.contains('#') { + link.anchor = link.url.all_after('#') + link.url = link.url.all_before('#') + } else { + // TODO: this is temproary fix for non anchor links not working + // link.url = '${link.url}#' + } + + // // parse link description as paragraph + // if link.description != '' { + // link.paragraph_new(mut link.parent_doc(), link.description) + // console.print_debug('debugzoni ${link.children()}') + // } + + if link.url.contains('://') { + // linkstate = LinkState.ok + link.isexternal = true + } + + // if link.url.starts_with('http') + // || link.url.starts_with('/') + // || link.url.starts_with('..') { + // link.cat = LinkType.html + // return + // } + + if link.url.starts_with('http') { + link.cat = LinkType.html + return + } + + if link.url == '' && link.anchor != '' { + link.cat = LinkType.anchor + return + } + + // AT THIS POINT LINK IS A PAGE OR A FILE + //////////////////////////////////////// + + link.url = link.url.trim_left(' ') + + // deal with special cases where file is not the only thing in () + if link.url.trim(' ').contains(' ') { + // to support something like + //![](./img/license_threefoldfzc.png ':size=800x900') + splitted := link.url.trim(' ').split(' ') + link.filename = splitted[0] + link.extra = splitted[1] + } else { + link.filename = link.url.trim(' ') + } + + if link.filename.contains('/') { + link.path = link.filename.all_before_last('/').trim_right('/') // just to make sure it wasn't // + } else { + link.path = '' + } + + // // find the prefix + // mut prefix_done := false + // mut filename := []string{} + // for x in link.filename.trim(' ').split('') { + // if !prefix_done { + // if x == '!' { + // link.newtab = true + // continue + // } + // if x == '@' { + // link.include = true + // continue + // } + // if x == '*' { + // link.moresites = true + // continue + // } + // } else { + // prefix_done = true + // } + // filename << x + // } + // link.filename = filename.join('') + + // // trims prefixes from path + // link.path = link.path.trim_left('!@*') + + // lets now check if there is site info in there + if link.filename.contains(':') { + splitted2 := link.filename.split(':') + if splitted2.len == 2 { + link.site = texttools.name_fix(splitted2[0]) + // if link.site.starts_with('info_') { + // link.site = link.site[5..] + // } + link.filename = splitted2[1] + } else if splitted2.len > 2 { + link.error('link can only have 1 x ":"/n${link}') + return + } else { + ('should never be here') + } + } + if link.site.contains('/') { + link.site = link.site.all_after_last('/') + } + + link.filename = os.base(link.filename).replace('\\', '/') + + if link.path.starts_with('./') { + link.path = link.path.after('./') + } + + if link.filename != '' { + // check which link type + ext := os.file_ext(link.filename).trim('.').to_lower() + + if ext == '' { + if link.cat == .image { + link.error('any link starting with ! needs to be image now ${link.content}') + return + } + link.cat = LinkType.page + link.filename += '.md' + } else if ext in ['jpg', 'png', 'svg', 'jpeg', 'gif'] { + if link.cat != .image { + link.error('any image needs to start with ! now ${link.content}') + return + } + link.cat = LinkType.image + } else if ext == 'md' { + if link.cat == .image { + link.error('any link starting with ! needs to be image now md, content is ${link.content}') + return + } + link.cat = LinkType.page + if link.filename.contains('@@') { + link.filename = '../' + link.filename.all_before('@@') + '/' + + link.filename.all_after('@@') + link.site = '' + } + } else if ext in ['html', 'htm'] { + if link.cat == .image { + link.error('any link starting with ! needs to be image now html, content is ${link.content}') + return + } + link.cat = LinkType.html + return + } else if ext in ['v', 'py', 'js', 'c', 'sh'] { + if link.cat == .image { + link.error('any link starting with ! needs to be image now code, content is ${link.content}') + return + } + link.cat = LinkType.code + return + } else if ext in ['doc', 'docx', 'zip', 'xls', 'pdf', 'xlsx', 'ppt', 'pptx'] { + if link.cat == .image { + link.error('any link starting with ! needs to be image now doc, content is ${link.content}') + return + } + link.cat = LinkType.file + return + } else if ext in ['json', 'yaml', 'yml', 'toml'] { + if link.cat == .image { + link.error('any link starting with ! needs to be image now data, content is ${link.content}') + return + } + link.cat = LinkType.data + return + } else if link.url.starts_with('mailto:') { + if link.cat == .image { + link.error('any link starting with ! needs to be image now mailto, content is ${link.content}') + return + } + link.cat = LinkType.email + return + } else if !link.url.contains_any('./!&;') { + // link.cat = LinkType.page + link.error('need to figure out what to do with ${link.url}, its wrong format ') + return + } else { + link.error("${link.url} (no match), ext was:'${ext}'") + return + } + if link.filename.contains(':') { + panic("should not have ':' in link for page or file (2).\n${link}") + } + } else { + // filename empty + if !link.url.trim(' ').starts_with('#') { + link.state = LinkState.error + link.error('EMPTY LINK.') + return + } + } +} diff --git a/lib/data/markdownparser/elements/element_list.v b/lib/data/markdownparser/elements/element_list.v new file mode 100644 index 00000000..d317386f --- /dev/null +++ b/lib/data/markdownparser/elements/element_list.v @@ -0,0 +1,253 @@ +module elements + +@[heap] +pub struct List { + DocBase +pub mut: + cat ListCat + interlinespace int // nr of lines in between +} + +pub enum ListCat { + bullet + star + nr +} + +pub fn (mut self List) process() !int { + if self.processed { + return 0 + } + + self.content = '' + self.processed = true + return 1 +} + +pub fn (mut self List) add_list_item(line string) !&ListItem { + if !line_is_list(line) { + return error('line is not a list item') + } + + mut list_item := ListItem{ + content: line + type_name: 'listitem' + parent_doc_: self.parent_doc_ + } + list_item.process()! + + self.determine_list_item_indentation(mut list_item)! + + return &list_item +} + +fn (mut self List) determine_list_item_indentation(mut list_item ListItem) ! { + if self.children.len == 0 { + list_item.indent = 0 + self.children << list_item + return + } + + for i := self.children.len - 1; i >= 0; i-- { + mut parent_li := self.children[i] + if mut parent_li is ListItem { + if list_item.depth - parent_li.depth < -1 { + continue + } + if list_item.depth - parent_li.depth >= -1 && list_item.depth - parent_li.depth < 2 { + // same indentation + list_item.indent = parent_li.indent + if parent_order := parent_li.order { + list_item.order = parent_order + 1 + } + self.children << list_item + } else if list_item.depth - parent_li.depth >= 2 + && list_item.depth - parent_li.depth < 6 { + // increase indentation + list_item.indent = parent_li.indent + 1 + self.children << list_item + } else { + // add content to last list item + parent_li.content += ' ${list_item.content}' + parent_li.processed = false + parent_li.process()! + } + + return + } + } + + return error('current list item ${list_item.content} has less depth/indentation than first list item') +} + +pub fn (self List) markdown() !string { + mut out := '' + for child in self.children { + if child is ListItem { + mut h := '' + for _ in 0 .. child.indent * 4 { + h += ' ' + } + + mut pre := '-' + if order := child.order { + pre = '${order}.' + } + + out += '${h}${pre} ${child.markdown()!}\n' + continue + } + + out += child.markdown()! + } + + return out +} + +pub fn (self List) pug() !string { + return error('cannot return pug, not implemented') +} + +pub fn (self List) html() !string { + mut out := '' + + // Determine the type of list based on `ListCat` + match self.cat { + .bullet { + out += '